From db29cdf76cdd4c3732164c99e9a22e9e02bf994f Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Mon, 24 May 2021 12:00:38 +0200 Subject: [PATCH 1/2] Features/tensor refactor (#115) --- .appveyor.yml | 74 -- .clang-tidy | 8 +- .github/workflows/address_san.yml | 75 -- .../workflows/{apple_clang.yml => apple.yml} | 41 +- .../{clangtidy_review.yml => clangtidy.yml} | 51 +- .github/workflows/clangtidy_check.yml | 47 - .github/workflows/code_format.yml | 12 +- .github/workflows/linux.yml | 99 ++ .github/workflows/linux_clang.yml | 97 -- .github/workflows/linux_gcc.yml | 99 -- .github/workflows/sanitizer.yml | 83 ++ .github/workflows/thread_san.yml | 76 -- .github/workflows/ub_san.yml | 76 -- .../{windows_msvc.yml => windows.yml} | 29 +- .travis.yml | 90 -- IDEs/qtcreator/examples/configuration.pri | 27 +- .../examples/tensor/example_access_tensor.pro | 7 - .../tensor/example_instantiate_tensor.pro | 7 - ...ple_multiply_tensors_einstein_notation.pro | 7 - ...mple_multiply_tensors_product_function.pro | 7 - .../tensor/example_simple_expressions.pro | 7 - IDEs/qtcreator/examples/tensor/tensor.pro | 25 +- IDEs/qtcreator/include/include.pro | 6 +- IDEs/qtcreator/include/tensor/tensor.pri | 58 +- IDEs/qtcreator/test/test_tensor.pro | 60 +- IDEs/qtcreator/tests.pri | 140 +-- IDEs/qtcreator/ublas_develop.pro | 4 +- README.md | 77 +- examples/tensor/.clang-tidy | 11 - examples/tensor/access_tensor.cpp | 179 +-- examples/tensor/instantiate_tensor.cpp | 108 +- .../multiply_tensors_einstein_notation.cpp | 277 +++-- .../multiply_tensors_product_function.cpp | 176 +-- examples/tensor/simple_expressions.cpp | 32 +- include/boost/numeric/ublas/tensor.hpp | 3 +- .../boost/numeric/ublas/tensor/algorithms.hpp | 357 +++--- .../boost/numeric/ublas/tensor/concepts.hpp | 34 + .../numeric/ublas/tensor/dynamic_extents.hpp | 241 ---- .../numeric/ublas/tensor/dynamic_strides.hpp | 219 ---- .../boost/numeric/ublas/tensor/expression.hpp | 48 +- .../ublas/tensor/expression_evaluation.hpp | 98 +- .../boost/numeric/ublas/tensor/extents.hpp | 53 + .../ublas/tensor/extents/extents_base.hpp | 54 + .../tensor/extents/extents_dynamic_size.hpp | 154 +++ .../tensor/extents/extents_functions.hpp | 247 ++++ .../ublas/tensor/extents/extents_static.hpp | 78 ++ .../extents/extents_static_functions.hpp | 637 ++++++++++ .../tensor/extents/extents_static_size.hpp | 148 +++ .../ublas/tensor/extents_functions.hpp | 449 ------- .../ublas/tensor/fixed_rank_extents.hpp | 248 ---- .../ublas/tensor/fixed_rank_strides.hpp | 180 ++- .../numeric/ublas/tensor/function/conj.hpp | 81 ++ .../numeric/ublas/tensor/function/imag.hpp | 82 ++ .../numeric/ublas/tensor/function/init.hpp | 120 ++ .../ublas/tensor/function/inner_prod.hpp | 68 + .../numeric/ublas/tensor/function/norm.hpp | 60 + .../ublas/tensor/function/outer_prod.hpp | 283 +++++ .../numeric/ublas/tensor/function/real.hpp | 80 ++ .../numeric/ublas/tensor/function/reshape.hpp | 87 ++ .../tensor/function/tensor_times_matrix.hpp | 256 ++++ .../tensor/function/tensor_times_tensor.hpp | 337 +++++ .../tensor/function/tensor_times_vector.hpp | 240 ++++ .../numeric/ublas/tensor/function/trans.hpp | 78 ++ .../boost/numeric/ublas/tensor/functions.hpp | 1108 +---------------- include/boost/numeric/ublas/tensor/index.hpp | 14 +- .../numeric/ublas/tensor/index_functions.hpp | 64 + .../numeric/ublas/tensor/multi_index.hpp | 66 +- .../ublas/tensor/multi_index_utility.hpp | 136 +- .../numeric/ublas/tensor/multiplication.hpp | 771 ++++++------ .../ublas/tensor/operators_arithmetic.hpp | 365 +++--- .../ublas/tensor/operators_comparison.hpp | 51 +- .../boost/numeric/ublas/tensor/ostream.hpp | 144 +-- .../numeric/ublas/tensor/static_extents.hpp | 150 --- .../numeric/ublas/tensor/static_strides.hpp | 267 ---- .../boost/numeric/ublas/tensor/strides.hpp | 99 -- include/boost/numeric/ublas/tensor/tags.hpp | 14 +- include/boost/numeric/ublas/tensor/tensor.hpp | 53 +- .../ublas/tensor/tensor/tensor_core.hpp | 27 + .../ublas/tensor/tensor/tensor_dynamic.hpp | 466 +++++++ .../ublas/tensor/tensor/tensor_engine.hpp | 29 + .../ublas/tensor/tensor/tensor_static.hpp | 456 +++++++ .../tensor/tensor/tensor_static_rank.hpp | 473 +++++++ .../numeric/ublas/tensor/tensor_core.hpp | 886 ------------- .../numeric/ublas/tensor/tensor_engine.hpp | 50 - .../ublas/tensor/traits/basic_type_traits.hpp | 53 +- .../ublas/tensor/traits/storage_traits.hpp | 105 +- .../traits/type_traits_dynamic_extents.hpp | 46 - .../traits/type_traits_dynamic_strides.hpp | 47 - .../tensor/traits/type_traits_extents.hpp | 41 - .../traits/type_traits_fixed_rank_extents.hpp | 45 - .../traits/type_traits_fixed_rank_strides.hpp | 46 - .../traits/type_traits_static_extents.hpp | 37 - .../traits/type_traits_static_strides.hpp | 46 - .../tensor/traits/type_traits_strides.hpp | 44 - .../tensor/traits/type_traits_tensor.hpp | 41 - .../numeric/ublas/tensor/type_traits.hpp | 11 +- test/tensor/Jamfile | 54 +- test/tensor/test_algorithms.cpp | 886 ++++++------- test/tensor/test_einstein_notation.cpp | 168 +-- test/tensor/test_expression.cpp | 159 +-- test/tensor/test_expression_evaluation.cpp | 156 ++- test/tensor/test_extents.cpp | 731 ----------- test/tensor/test_extents_dynamic.cpp | 190 +++ .../test_extents_dynamic_rank_static.cpp | 155 +++ test/tensor/test_extents_functions.cpp | 634 ++++++++++ .../test_fixed_rank_expression_evaluation.cpp | 349 +++--- test/tensor/test_fixed_rank_extents.cpp | 1027 ++++++++------- test/tensor/test_fixed_rank_functions.cpp | 679 +++++----- .../test_fixed_rank_operators_arithmetic.cpp | 277 ++--- .../test_fixed_rank_operators_comparison.cpp | 268 ++-- test/tensor/test_fixed_rank_strides.cpp | 281 +++-- test/tensor/test_fixed_rank_tensor.cpp | 759 +++++------ .../test_fixed_rank_tensor_matrix_vector.cpp | 637 +++++----- test/tensor/test_functions.cpp | 146 ++- test/tensor/test_multi_index.cpp | 107 +- test/tensor/test_multi_index_utility.cpp | 396 +++--- test/tensor/test_multiplication.cpp | 595 +++++---- test/tensor/test_operators_arithmetic.cpp | 48 +- test/tensor/test_operators_comparison.cpp | 394 +++--- .../test_static_expression_evaluation.cpp | 64 +- test/tensor/test_static_extents.cpp | 691 +++++----- .../test_static_operators_arithmetic.cpp | 34 +- .../test_static_operators_comparison.cpp | 35 +- test/tensor/test_static_strides.cpp | 251 ++-- test/tensor/test_static_tensor.cpp | 152 ++- .../test_static_tensor_matrix_vector.cpp | 415 +++--- test/tensor/test_strides.cpp | 252 ++-- test/tensor/test_tensor.cpp | 124 +- test/tensor/test_tensor_matrix_vector.cpp | 320 ++--- test/tensor/utility.hpp | 67 +- 130 files changed, 12540 insertions(+), 12404 deletions(-) delete mode 100644 .appveyor.yml delete mode 100644 .github/workflows/address_san.yml rename .github/workflows/{apple_clang.yml => apple.yml} (69%) rename .github/workflows/{clangtidy_review.yml => clangtidy.yml} (53%) delete mode 100644 .github/workflows/clangtidy_check.yml create mode 100644 .github/workflows/linux.yml delete mode 100644 .github/workflows/linux_clang.yml delete mode 100644 .github/workflows/linux_gcc.yml create mode 100644 .github/workflows/sanitizer.yml delete mode 100644 .github/workflows/thread_san.yml delete mode 100644 .github/workflows/ub_san.yml rename .github/workflows/{windows_msvc.yml => windows.yml} (77%) delete mode 100644 .travis.yml delete mode 100644 IDEs/qtcreator/examples/tensor/example_access_tensor.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_simple_expressions.pro delete mode 100644 examples/tensor/.clang-tidy create mode 100644 include/boost/numeric/ublas/tensor/concepts.hpp delete mode 100644 include/boost/numeric/ublas/tensor/dynamic_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/dynamic_strides.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_base.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_functions.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_static.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp delete mode 100644 include/boost/numeric/ublas/tensor/extents_functions.hpp delete mode 100644 include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/conj.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/imag.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/init.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/inner_prod.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/norm.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/outer_prod.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/real.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/reshape.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/trans.hpp create mode 100644 include/boost/numeric/ublas/tensor/index_functions.hpp delete mode 100644 include/boost/numeric/ublas/tensor/static_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/static_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/strides.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp delete mode 100644 include/boost/numeric/ublas/tensor/tensor_core.hpp delete mode 100644 include/boost/numeric/ublas/tensor/tensor_engine.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp delete mode 100644 test/tensor/test_extents.cpp create mode 100644 test/tensor/test_extents_dynamic.cpp create mode 100644 test/tensor/test_extents_dynamic_rank_static.cpp create mode 100644 test/tensor/test_extents_functions.cpp diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 2b2d8fd53..000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2018 Stefan Seefeld -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -version: 1.0.{build}-{branch} - -shallow_clone: true - -branches: - only: - - master - - develop - - /feature\/.*/ - -environment: - matrix: - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - TOOLSET: msvc-14.2 - CXXSTD: latest - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - TOOLSET: msvc-14.2 - CXXSTD: 17 - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - TOOLSET: msvc-14.0 - CXXSTD: 11 - -install: - - cd "C:\Tools\vcpkg" - - git pull - - .\bootstrap-vcpkg.bat - - cd %appveyor_build_folder% - # Install OpenCL runtime (driver) for Intel / Xeon package - - appveyor DownloadFile "http://registrationcenter-download.intel.com/akdlm/irc_nas/9022/opencl_runtime_16.1.1_x64_setup.msi" - - start /wait msiexec /i opencl_runtime_16.1.1_x64_setup.msi /qn /l*v msiexec2.log - # FIXME: To be removed https://help.appveyor.com/discussions/problems/13000-cmake_toolchain_filevcpkgcmake-conflicts-with-cmake-native-findboostcmake" - - ps: 'Write-Host "Installing latest vcpkg.cmake module" -ForegroundColor Magenta' - - appveyor DownloadFile https://raw.githubusercontent.com/Microsoft/vcpkg/master/scripts/buildsystems/vcpkg.cmake -FileName "c:\tools\vcpkg\scripts\buildsystems\vcpkg.cmake" - - set "TRIPLET=x64-windows" - - vcpkg --triplet %TRIPLET% install opencl clblas - - set PATH=C:\Tools\vcpkg\installed\%TRIPLET%\bin;%PATH% - - set VCPKG_I=C:\Tools\vcpkg\installed\%TRIPLET%\include - - set VCPKG_L=C:\Tools\vcpkg\installed\%TRIPLET%\lib - - set BOOST_BRANCH=develop - - if "%APPVEYOR_REPO_BRANCH%" == "master" set BOOST_BRANCH=master - - cd .. - - git clone -b %BOOST_BRANCH% https://github.com/boostorg/boost.git boost-root - - cd boost-root - - git submodule update --init tools/build - - git submodule update --init libs/config - - git submodule update --init tools/boostdep - - xcopy /s /e /q %APPVEYOR_BUILD_FOLDER% libs\numeric\ublas - - python tools/boostdep/depinst/depinst.py -I benchmarks numeric/ublas - - xcopy %APPVEYOR_BUILD_FOLDER%\opencl.jam %USERPROFILE% - - xcopy %APPVEYOR_BUILD_FOLDER%\clblas.jam %USERPROFILE% - - ps: | - # Creating %USERPROFILE%/user-config.jam file - @' - import os regex toolset ; - local toolset = [ regex.split [ os.environ TOOLSET ] "-" ] ; - local vcpkg_i = [ os.environ VCPKG_I ] ; - local vcpkg_l = [ os.environ VCPKG_L ] ; - using $(toolset[1]) : $(toolset[2-]:J="-") : ; - using opencl : : $(vcpkg_i) $(vcpkg_l) ; - using clblas : : $(vcpkg_i) $(vcpkg_l) ; - '@ | sc "$env:USERPROFILE/user-config.jam" - - cmd /c bootstrap - - b2 -j3 headers - -build: off - -test_script: - - if not "%CXXSTD%" == "" set CXXSTD=cxxstd=%CXXSTD% - - set ADDRMD=address-model=64 - - b2 -j3 libs/numeric/ublas/test toolset=%TOOLSET% %CXXSTD% %ADDRMD% \ No newline at end of file diff --git a/.clang-tidy b/.clang-tidy index e17df9a0e..4a0edffcd 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,6 +1,6 @@ --- -Checks: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements' -WarningsAsErrors: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements' +Checks: '-*,clang-*,bugprone-*,cppcoreguidelines-*,google-*,hicpp-*,modernize-*,performance-*,readability-*,portability-*, +-modernize-use-trailing-return-type, -readability-uppercase-literal-suffix, -readability-braces-around-statements, -hicpp-uppercase-literal-suffix, -hicpp-braces-around-statements, -hicpp-no-array-decay, -cppcoreguidelines-pro-bounds-constant-array-index, -cppcoreguidelines-pro-bounds-pointer-arithmetic, -cppcoreguidelines-pro-bounds-array-to-pointer-decay, -readability-avoid-const-params-in-decls, -google-readability-braces-around-statements,-google-explicit-constructor,-hicpp-vararg,-cppcoreguidelines-pro-type-vararg, -cppcoreguidelines-avoid-non-const-global-variables, -google-readability-todo, -cppcoreguidelines-pro-type-member-init, -hicpp-member-init, -cppcoreguidelines-special-member-functions, -hicpp-special-member-functions' HeaderFilterRegex: 'boost\/numeric\/ublas\/tensor\/.*' AnalyzeTemporaryDtors: false FormatStyle: file @@ -8,4 +8,8 @@ User: ublas-developers CheckOptions: - key: modernize-use-nullptr.NullMacros value: 'NULL' + - key: readability-magic-numbers.IgnoredIntegerValues + value: '1;2;3;4;5;6;7;8;9;' + - key: cppcoreguidelines-avoid-magic-numbers.IgnoredIntegerValues + value: '1;2;3;4;5;6;7;8;9;' ... diff --git a/.github/workflows/address_san.yml b/.github/workflows/address_san.yml deleted file mode 100644 index 5cf0c85db..000000000 --- a/.github/workflows/address_san.yml +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Address Sanitizer -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-9, cxx: g++-9, cxxstd: 2a, name: gcc} - - {cc: clang-9, cxx: clang++-9, cxxstd: 2a, name: clang} - steps: - - uses: actions/checkout@v2 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> $GITHUB_ENV - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - echo "using ${{matrix.config.name}} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=address -fno-omit-frame-pointer" linkflags="-fsanitize=address" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=address -fno-omit-frame-pointer" linkflags="-fsanitize=address" diff --git a/.github/workflows/apple_clang.yml b/.github/workflows/apple.yml similarity index 69% rename from .github/workflows/apple_clang.yml rename to .github/workflows/apple.yml index dfaa5d470..f0c8df792 100644 --- a/.github/workflows/apple_clang.yml +++ b/.github/workflows/apple.yml @@ -1,8 +1,9 @@ # Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: Apple Clang +name: "Apple MacOS" on: push: @@ -15,24 +16,26 @@ on: - 'doc/**' jobs: build: - name: "Darwin 11.0 -std=c++${{matrix.cxxstd}}" - runs-on: macos-latest + name: "MacOS 10.15 clang -std=c++${{matrix.cxxstd}}" + runs-on: macos-10.15 strategy: - fail-fast: false + fail-fast: true matrix: - cxxstd: [11, 17, 2a] + cxxstd: [2a] steps: - uses: actions/checkout@v2 - - name: Prepare BOOST_ROOT + - name: Git Clone Boost.uBlas run: | cd ${GITHUB_WORKSPACE} cd .. git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root cd boost-root - + echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} + echo $BOOST_ROOT + git submodule update --init --depth=1 --jobs 8 tools/build git submodule update --init --depth=1 --jobs 8 libs/config git submodule update --init --depth=1 --jobs 8 tools/boostdep @@ -41,33 +44,29 @@ jobs: cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build + - name: Bootstrap and Compile Boost.uBlas run: | - echo $BOOST_ROOT cd $BOOST_ROOT - - echo "using clang : : clang++ ;" >> ~/user-config.jam; - + ./bootstrap.sh + ./b2 -j8 headers + echo "using clang : : $(brew --prefix llvm)/bin/clang++ ;" >> ~/user-config.jam; + + - name: Test Benchmarks run: | cd $BOOST_ROOT cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=clang cxxstd=${{matrix.cxxstd}} + $BOOST_ROOT/b2 -j8 benchmarks cxxstd=${{matrix.cxxstd}} - name: Test Tensor Examples run: | cd $BOOST_ROOT cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=clang cxxstd=${{matrix.cxxstd}} + $BOOST_ROOT/b2 -j8 examples/tensor cxxstd=${{matrix.cxxstd}} - name: Test Tensor run: | cd $BOOST_ROOT cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=clang cxxstd=${{matrix.cxxstd}} + $BOOST_ROOT/b2 -j8 test/tensor cxxstd=${{matrix.cxxstd}} + diff --git a/.github/workflows/clangtidy_review.yml b/.github/workflows/clangtidy.yml similarity index 53% rename from .github/workflows/clangtidy_review.yml rename to .github/workflows/clangtidy.yml index b14c5bb9f..11b1aea41 100644 --- a/.github/workflows/clangtidy_review.yml +++ b/.github/workflows/clangtidy.yml @@ -1,26 +1,30 @@ # Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: Clang tidy Review - -on: [pull_request] - +name: "Static Analysis" + +on: + push: + paths-ignore: + - '**.md' + - 'doc/**' + pull_request: + paths-ignore: + - '**.md' + - 'doc/**' jobs: check: - name: Clang tidy Review + name: Linux Clang-Tidy runs-on: ubuntu-20.04 - if: github.event.pull_request.head.repo.full_name == github.repository steps: - - uses: actions/checkout@v2 - - name: "Install dependencies" - run: | - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key 2>/dev/null | sudo apt-key add - - sudo add-apt-repository 'deb http://apt.llvm.org/focal llvm-toolchain-focal-10 main' -y - sudo apt-get update -q - sudo apt-get install -y clang-10 clang-tidy-10 + - uses: actions/checkout@v2 + + - name: Install Clang 11 + run: sudo apt-get update && sudo apt-get install -y clang-11 clang-tidy-11 - - name: "Install Boost from Source" + - name: "Install Boost from Source" run: | cd .. git clone --depth 1 https://github.com/boostorg/boost.git --recursive --shallow-submodules @@ -29,19 +33,20 @@ jobs: ./b2 headers sudo cp -r -L boost /usr/include rm -rf boost + + - name: "Run Clang-Tidy" + run: clang-tidy-11 examples/tensor/*.cpp test/tensor/*.cpp -- -Iinclude -std=c++20 > reports.txt + - name: "Print Clang-Tidy Report" + run: cat reports.txt + - uses: actions/setup-python@v2 - - - name: "Run clang-tidy check" - run: | - clang-tidy-10 examples/tensor/*.cpp -- -Iinclude -std=c++17 > reports.txt - + - name: "Post review comments" - if: always() + if: github.event.pull_request.head.repo.full_name == github.repository run: | pip3 install 'unidiff~=0.6.0' --force-reinstall pip3 install 'PyGithub~=1.51' --force-reinstall pip3 install 'requests~=2.23' --force-reinstall - - python ./.ci/post_review.py --repository ${{ github.repository }} --token ${{ github.token}} --pr ${{ github.event.pull_request.number }} --path reports.txt - + python ./.ci/post_review.py --repository ${{ github.repository }} --token ${{ github.token}} --pr ${{ github.event.pull_request.number }} --path reports.txt + diff --git a/.github/workflows/clangtidy_check.yml b/.github/workflows/clangtidy_check.yml deleted file mode 100644 index 1b5236352..000000000 --- a/.github/workflows/clangtidy_check.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Clang tidy checks - -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - check: - name: Clang tidy Check - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - - name: "Install dependencies" - run: | - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key 2>/dev/null | sudo apt-key add - - sudo add-apt-repository 'deb http://apt.llvm.org/focal llvm-toolchain-focal-10 main' -y - sudo apt-get update -q - sudo apt-get install -y clang-10 clang-tidy-10 - - - name: "Install Boost from Source" - run: | - cd .. - git clone --depth 1 https://github.com/boostorg/boost.git --recursive --shallow-submodules - cd boost - ./bootstrap.sh - ./b2 headers - sudo cp -r -L boost /usr/include - rm -rf boost - - - name: "Run clang-tidy check" - run: | - clang-tidy-10 examples/tensor/*.cpp -- -Iinclude -std=c++17 > reports.txt - - - name: "Show clang tidy report" - if: always() - run: | - cat reports.txt diff --git a/.github/workflows/code_format.yml b/.github/workflows/code_format.yml index f1ba49372..a7c53648b 100644 --- a/.github/workflows/code_format.yml +++ b/.github/workflows/code_format.yml @@ -2,7 +2,7 @@ # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: "Code Format" +name: "Clang Code Format" on: push: @@ -15,7 +15,7 @@ on: - 'doc/**' jobs: format: - name: "Code Formatting Check" + name: "Clang Code Formatting Check" runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 @@ -24,8 +24,8 @@ jobs: run: | sudo apt-get install -y clang-format-10 - - name: "Format Codes" - run: clang-format-10 -i examples/tensor/*.cpp test/tensor/*.cpp include/boost/numeric/ublas/tensor/*.hpp include/boost/numeric/ublas/tensor/*/*.hpp +# - name: "Format Codes" +# run: clang-format-10 -i examples/tensor/*.cpp test/tensor/*.cpp include/boost/numeric/ublas/tensor/*.hpp include/boost/numeric/ublas/tensor/*/*.hpp - - name: Check diff - run: git diff --exit-code HEAD +# - name: Check diff +# run: git diff --exit-code HEAD diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml new file mode 100644 index 000000000..20b9c5311 --- /dev/null +++ b/.github/workflows/linux.yml @@ -0,0 +1,99 @@ +# Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy +# Distributed under Boost Software License, Version 1.0 +# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) + +name: "Linux" + +on: + push: + paths-ignore: + - '**.md' + - 'doc/**' + pull_request: + paths-ignore: + - '**.md' + - 'doc/**' + +jobs: + build: + name: Ubuntu 20.04 "cxx=${{matrix.config.cxx}}, std=c++${{matrix.config.cxxstd}}, variant=c++${{matrix.config.variant}}" + runs-on: ubuntu-20.04 + strategy: + fail-fast: true + # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early + matrix: + config: + - { name: clang, cc: clang-10, cxx: clang++-10, cxxstd: 20, variant: debug, opt: off} + - { name: clang, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off} + - { name: clang, cc: clang-10, cxx: clang++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp, ldflags: -lgomp=libomp5} + - { name: clang, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp, ldflags: -lgomp=libomp5} + - { name: gcc, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: debug, opt: off} + - { name: gcc, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp, ldflags: -lgomp} + + steps: + - uses: actions/checkout@v2 + + - name: Install g++-10 + if: matrix.config.cxx == 'g++-10' + run: sudo apt update && sudo apt-get install -y g++-10 libomp-dev + + - name: Install Clang 10 + if: matrix.config.cxx == 'clang++-10' + run: sudo apt-get update && sudo apt-get install -y clang-10 + + - name: Install Clang 11 + if: matrix.config.cxx == 'clang++-11' + run: sudo apt-get update && sudo apt-get install -y clang-11 + + - name: Git Clone Boost.uBlas + run: | + cd ${GITHUB_WORKSPACE} + cd .. + + git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root + cd boost-root + echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} + echo $BOOST_ROOT + + git submodule update --init --depth=1 --jobs 8 tools/build + git submodule update --init --depth=1 --jobs 8 libs/config + git submodule update --init --depth=1 --jobs 8 tools/boostdep + + mkdir -p libs/numeric/ + cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas + python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas + + - name: Bootstrap Boost and Compile Boost + run: | + cd $BOOST_ROOT + ./bootstrap.sh + ./b2 -j8 headers + echo "using ${{ matrix.config.name }} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; + +# - name: Test Benchmarks +# run: | +# cd $BOOST_ROOT +# cd libs/numeric/ublas +# $BOOST_ROOT/b2 -j 4 benchmarks toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" + + - name: Test Tensor Examples + run: | + cd $BOOST_ROOT/libs/numeric/ublas + if [ -z "$cxxflags" ] + then + $BOOST_ROOT/b2 -j8 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} + else + $BOOST_ROOT/b2 -j8 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + fi + + - name: Test Tensor Unit-Tests + run: | + cd $BOOST_ROOT/libs/numeric/ublas + if [ -z "$cxxflags" ] + then + $BOOST_ROOT/b2 -j8 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} + else + $BOOST_ROOT/b2 -j8 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + fi + diff --git a/.github/workflows/linux_clang.yml b/.github/workflows/linux_clang.yml deleted file mode 100644 index e03dfcc1a..000000000 --- a/.github/workflows/linux_clang.yml +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Linux Clang Release -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' - -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - { cc: clang-6.0, cxx: clang++-6.0, cxxstd: 11} - - { cc: clang-8, cxx: clang++-8, cxxstd: 11} - - { cc: clang-9, cxx: clang++-9, cxxstd: 11} - - { cc: clang-10, cxx: clang++-10, cxxstd: 11} - - { cc: clang-6.0, cxx: clang++-6.0, cxxstd: 17} - - { cc: clang-8, cxx: clang++-8, cxxstd: 17} - - { cc: clang-9, cxx: clang++-9, cxxstd: 17} - - { cc: clang-10, cxx: clang++-10, cxxstd: 17} - - { cc: clang-10, cxx: clang++-10, cxxstd: 2a} - - steps: - - uses: actions/checkout@v2 - - - name: Install Clang 10 - if: matrix.config.cxx == 'clang++-10' - run: | - sudo apt-get update - sudo apt-get install -y clang-10 - - - name: Install Clang 6 - if: matrix.config.cxx == 'clang++-6.0' - run: | - sudo apt-get update - sudo apt-get install -y clang-6.0 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using clang : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" diff --git a/.github/workflows/linux_gcc.yml b/.github/workflows/linux_gcc.yml deleted file mode 100644 index 3fa47504e..000000000 --- a/.github/workflows/linux_gcc.yml +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan, Cem Bassoy -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Linux GCC Debug -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-7, cxx: g++-7, cxxstd: 11} - - {cc: gcc-8, cxx: g++-8, cxxstd: 11} - - {cc: gcc-9, cxx: g++-9, cxxstd: 11} - - {cc: gcc-10, cxx: g++-10, cxxstd: 11} - - {cc: gcc-7, cxx: g++-7, cxxstd: 17} - - {cc: gcc-8, cxx: g++-8, cxxstd: 17} - - {cc: gcc-9, cxx: g++-9, cxxstd: 17} - - {cc: gcc-10, cxx: g++-10, cxxstd: 17} - - {cc: gcc-10, cxx: g++-10, cxxstd: 2a} - steps: - - uses: actions/checkout@v2 - - - name: Install GCC-10 - if: matrix.config.cxx == 'g++-10' - run: | - sudo apt update - sudo apt-get install -y g++-10 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> $GITHUB_ENV - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using gcc : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=gcc cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=gcc cxxstd=${{matrix.config.cxxstd}} cxxflags="-O0" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=gcc cxxstd=${{matrix.config.cxxstd}} cxxflags="-O0 -g --coverage" linkflags="--coverage" - - - name: Report Code coverage - if: matrix.config.cxxstd == '17' && matrix.config.cxx == 'g++-9' - run: | - ${GITHUB_WORKSPACE}/.ci/report_coverage.sh - - curl -s https://codecov.io/bash > cov.sh - chmod +x cov.sh - ./cov.sh -f coverage.info || echo "Codecov did not collect coverage reports" - diff --git a/.github/workflows/sanitizer.yml b/.github/workflows/sanitizer.yml new file mode 100644 index 000000000..a2edff34b --- /dev/null +++ b/.github/workflows/sanitizer.yml @@ -0,0 +1,83 @@ +# Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy +# Distributed under Boost Software License, Version 1.0 +# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) + +name: "Clang Sanitizer" + +on: + push: + paths-ignore: + - '**.md' + - 'doc/**' + pull_request: + paths-ignore: + - '**.md' + - 'doc/**' +jobs: + build: + name: "${{matrix.config.name}} with ${{matrix.config.description}} sanitizer with std=${{matrix.config.cxx}} and variant=${{matrix.config.variant}}" + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early + matrix: + config: + - { name: clang, description: address and leak, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off , cxxflags: -fsanitize=address -fno-omit-frame-pointer, ldflags: -fsanitize=address } + - { name: clang, description: undefined behavior, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off , cxxflags: -fsanitize=undefined, ldflags: -fsanitize=undefined } + - { name: clang, description: thread, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off , cxxflags: -fsanitize=thread, ldflags: -fsanitize=thread } + - { name: gcc, description: address and leak, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp -fsanitize=address -fno-omit-frame-pointer, ldflags: -fsanitize=address -fopenmp} +# - { name: gcc, description: undefined behavior, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp -fsanitize=undefined, ldflags: -fsanitize=undefined -fopenmp} +# - { name: gcc, description: thread, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp -fsanitize=thread, ldflags: -fsanitize=thread -fopenmp} + + steps: + - uses: actions/checkout@v2 + + - name: Install Clang 11 + run: sudo apt-get update && sudo apt-get install -y clang-11 clang-tools-11 + + - name: Install GCC 10 + run: sudo apt-get update && sudo apt-get install -y g++-10 libomp-dev + + - name: Git Clone Boost.uBlas + run: | + cd ${GITHUB_WORKSPACE} + cd .. + + git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root + cd boost-root + echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} + echo $BOOST_ROOT + + git submodule update --init --depth=1 --jobs 8 tools/build + git submodule update --init --depth=1 --jobs 8 libs/config + git submodule update --init --depth=1 --jobs 8 tools/boostdep + + mkdir -p libs/numeric/ + cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas + python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas + + - name: Bootstrap Boost and Compile Boost + run: | + cd $BOOST_ROOT + ./bootstrap.sh + ./b2 -j8 headers + echo "using ${{ matrix.config.name }} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; + export ASAN_OPTIONS=symbolize=1 + export ASAN_OPTIONS=detect_leaks=1 + + - name: Run Tensor Examples with "${{matrix.config.description}}" sanitizer + run: | + cd $BOOST_ROOT/libs/numeric/ublas + ASAN_OPTIONS=detect_leaks=1 + ASAN_OPTIONS=symbolize=1 + $BOOST_ROOT/b2 -j8 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + + - name: Run Tensor Tests with "${{matrix.config.description}}" sanitizer + run: | + cd $BOOST_ROOT/libs/numeric/ublas + ASAN_OPTIONS=detect_leaks=1 + ASAN_OPTIONS=symbolize=1 + $BOOST_ROOT/b2 -j8 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + + diff --git a/.github/workflows/thread_san.yml b/.github/workflows/thread_san.yml deleted file mode 100644 index 4fc416a0d..000000000 --- a/.github/workflows/thread_san.yml +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Thread Sanitizer -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-9, cxx: g++-9, cxxstd: 2a, name: gcc} - - {cc: clang-9, cxx: clang++-9, cxxstd: 2a, name: clang} - steps: - - uses: actions/checkout@v2 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using ${{matrix.config.name}} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=thread -O2" linkflags="-fsanitize=thread" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=thread -O2" linkflags="-fsanitize=thread" diff --git a/.github/workflows/ub_san.yml b/.github/workflows/ub_san.yml deleted file mode 100644 index 1985bf458..000000000 --- a/.github/workflows/ub_san.yml +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Undefined Behaviour Sanitizer -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-9, cxx: g++-9, cxxstd: 2a, name: gcc} - - {cc: clang-9, cxx: clang++-9, cxxstd: 2a, name: clang} - steps: - - uses: actions/checkout@v2 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using ${{matrix.config.name}} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=undefined" linkflags="-fsanitize=undefined" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=undefined" linkflags="-fsanitize=undefined" diff --git a/.github/workflows/windows_msvc.yml b/.github/workflows/windows.yml similarity index 77% rename from .github/workflows/windows_msvc.yml rename to .github/workflows/windows.yml index 59e6ff05d..7bdfc69be 100644 --- a/.github/workflows/windows_msvc.yml +++ b/.github/workflows/windows.yml @@ -1,8 +1,9 @@ # Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: "Windows MSVC" +name: "Windows" on: push: paths-ignore: @@ -14,15 +15,15 @@ on: - 'doc/**' jobs: build: - name: "windows=${{matrix.config.os}} msvc=${{matrix.config.version}} std=c++${{matrix.config.cxxstd}}" + name: "Windows=${{matrix.config.os}} msvc=${{matrix.config.version}} std=c++${{matrix.config.cxxstd}}" runs-on: ${{matrix.config.os}} strategy: - fail-fast: false + fail-fast: true matrix: config: - - {os: windows-2016, toolset: msvc, version: 14.16, cxxstd: 11} - - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 11} - - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 17} +# - {os: windows-2016, toolset: msvc, version: 14.16, cxxstd: 11} +# - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 11} +# - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 17} - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: latest} steps: @@ -70,24 +71,24 @@ jobs: cmd /c bootstrap b2 -j8 headers - - name: Test Benchmarks - shell: cmd - run: | - cd %BOOST_ROOT% - cd libs\numeric\ublas - %BOOST_ROOT%\b2 -j 4 benchmarks toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 +# - name: Test Benchmarks +# shell: cmd +# run: | +# cd %BOOST_ROOT% +# cd libs\numeric\ublas +# %BOOST_ROOT%\b2 -j 4 benchmarks toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 - name: Test Tensor Examples shell: cmd run: | cd %BOOST_ROOT% cd libs\numeric\ublas - %BOOST_ROOT%\b2 -j 4 examples/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 + %BOOST_ROOT%\b2 -j8 examples/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 - name: Test Tensor shell: cmd run: | cd %BOOST_ROOT% cd libs\numeric\ublas - %BOOST_ROOT%\b2 -j 4 test/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 + %BOOST_ROOT%\b2 -j8 test/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index f4578d1b4..000000000 --- a/.travis.yml +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2018 Stefan Seefeld -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - - -language: cpp - -dist: bionic - -sudo: required - -branches: - only: - - master - - develop - - doc - - ci - -# env: specifies additional global variables to define per row in build matrix -env: - global: - - CLBLAS_PREFIX=${TRAVIS_BUILD_DIR}/CLBLAS/ - - PATH=${CLBLAS_PREFIX}/bin:$PATH - - LD_LIBRARY_PATH=${CLBLAS_PREFIX}/lib:$LD_LIBRARY_PATH - -matrix: - include: - - os: linux - env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=11 - - os: linux - env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=17 - - os: linux - env: TOOLSET=gcc COMPILER=g++-9 CXXSTD=2a - - os: linux - env: TOOLSET=clang COMPILER=clang++-7 CXXSTD=17 - - os: linux - env: TOOLSET=clang COMPILER=clang++-10 CXXSTD=2a - -addons: - apt: - sources: - - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - - sourceline: 'ppa:ubuntu-toolchain-r/test' - packages: - - g++-7 - - g++-9 - - clang-7 - - clang-10 - - libopenblas-base - - rpm2cpio - - cpio - - clinfo - - opencl-headers - - ocl-icd-opencl-dev - -before_install: - - if [ ${TRAVIS_OS_NAME} == "linux" ]; then .ci/install-ocl-ubuntu.sh; fi - - .ci/install-clblas.sh - - cmake --version; - - ${CC} --version; - - ${CXX} --version; - -install: - - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - - cd boost-root - - git submodule update --init --jobs 8 tools/build - - git submodule update --init --jobs 8 libs/config - - git submodule update --init --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - - cp -rp $TRAVIS_BUILD_DIR/. libs/numeric/ublas - - python tools/boostdep/depinst/depinst.py -I benchmarks numeric/ublas - - ./bootstrap.sh - - ./b2 -j 8 headers - - export BOOST_ROOT="`pwd`" - -# use script: to execute build steps -script: - - |- - echo "using $TOOLSET : : $COMPILER ;" >> ~/user-config.jam; - echo "using clblas : : ${CLBLAS_PREFIX}/include ${CLBLAS_PREFIX}/lib ;" >> ~/user-config.jam; - cp $TRAVIS_BUILD_DIR/opencl.jam ~/ - cp $TRAVIS_BUILD_DIR/clblas.jam ~/ - - cd libs/numeric/ublas - - $BOOST_ROOT/b2 -j 8 test toolset=$TOOLSET cxxstd=$CXXSTD - -notifications: - email: - on_success: always \ No newline at end of file diff --git a/IDEs/qtcreator/examples/configuration.pri b/IDEs/qtcreator/examples/configuration.pri index 0c0c8650f..dde3f8f8f 100644 --- a/IDEs/qtcreator/examples/configuration.pri +++ b/IDEs/qtcreator/examples/configuration.pri @@ -1,17 +1,24 @@ CONFIG -= qt CONFIG += depend_includepath win*: CONFIG += console +CONFIG += object_parallel_to_source -QMAKE_CXXFLAGS += -std=c++17 -fopenmp -g +QMAKE_CXXFLAGS =-std=c++20 +QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra +QMAKE_CXXFLAGS +=-Wno-unknown-pragmas +QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable -# If ublas tests are build with boost source code then, -# then boost headers and boost libraries should be used. +gcc:QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp +clang: QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp=libiomp5 -BOOST_ROOT=../../../../../.. +gcc:QMAKE_CXXFLAGS_DEBUG += -g +clang: QMAKE_CXXFLAGS_DEBUG +=-g -exists( $$BOOST_ROOT/boost-build.jam ) { - message("Boost installed.") - INCLUDEPATH += $${BOOST_ROOT}/../libs/numeric/ublas/include - LIBS += -L$${BOOST_ROOT}/../stage/lib -lgomp - QMAKE_RPATHDIR += $${BOOST_ROOT}/../stage/lib -} +BOOST_ROOT=../../../../../../../.. +QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib +INCLUDEPATH+=$$BOOST_ROOT/libs/numeric/ublas/include +LIBS+=-L$${BOOST_ROOT}/stage/lib + +#message("INCLUDEPATH: $${INCLUDEPATH}") + +INCLUDE_DIR=$${BOOST_ROOT}/libs/numeric/ublas/include diff --git a/IDEs/qtcreator/examples/tensor/example_access_tensor.pro b/IDEs/qtcreator/examples/tensor/example_access_tensor.pro deleted file mode 100644 index c6d761159..000000000 --- a/IDEs/qtcreator/examples/tensor/example_access_tensor.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = access_tensor - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/access_tensor.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro b/IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro deleted file mode 100644 index e98706b38..000000000 --- a/IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = instantiate_tensor - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/instantiate_tensor.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro b/IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro deleted file mode 100644 index 1aca61e34..000000000 --- a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = multiply_tensors_einstein_notation - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/multiply_tensors_einstein_notation.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro b/IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro deleted file mode 100644 index bf02e0228..000000000 --- a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = multiply_tensors_product_function - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/multiply_tensors_product_function.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_simple_expressions.pro b/IDEs/qtcreator/examples/tensor/example_simple_expressions.pro deleted file mode 100644 index 1c65b9cc6..000000000 --- a/IDEs/qtcreator/examples/tensor/example_simple_expressions.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = simple_expressions - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/simple_expressions.cpp diff --git a/IDEs/qtcreator/examples/tensor/tensor.pro b/IDEs/qtcreator/examples/tensor/tensor.pro index feb833f62..f928b32bf 100644 --- a/IDEs/qtcreator/examples/tensor/tensor.pro +++ b/IDEs/qtcreator/examples/tensor/tensor.pro @@ -1,21 +1,10 @@ -#TEMPLATE = subdirs -#SUBDIRS = \ -# construction_access \ -# simple_expressions \ -# multiply_tensors_prod \ -# einstein_notation \ -# instantiate_tensor +TEMPLATE = subdirs +SUBDIRS = \ + simple_expressions \ + multiply_tensors_product_function \ + multiply_tensors_einstein_notation \ + instantiate_tensor \ + access_tensor -include ( example_instantiate_tensor.pro ) -include ( example_access_tensor.pro ) -include ( example_simple_expressions.pro ) -include ( example_multiply_tensors_product_function.pro ) -include ( example_multiply_tensors_einstein_notation.pro ) - -#instantiate_tensor.file = example_instantiate_tensor.pro -#construction_access.file = example_construction_access.pro -#simple_expressions.file = example_simple_expressions.pro -#multiply_tensors_prod.file = example_multiply_tensors_prod.pro -#einstein_notation.file = example_einstein_notation.pro diff --git a/IDEs/qtcreator/include/include.pro b/IDEs/qtcreator/include/include.pro index 17d199106..a5aeead8b 100644 --- a/IDEs/qtcreator/include/include.pro +++ b/IDEs/qtcreator/include/include.pro @@ -1,11 +1,9 @@ TEMPLATE = lib TARGET = ublas -CONFIG += \ - staticlib \ - depend_includepath +CONFIG += staticlib depend_includepath CONFIG -= qt -CONFIG += c++17 +CONFIG += c++20 INCLUDE_DIR=../../../include include(detail/detail.pri) diff --git a/IDEs/qtcreator/include/tensor/tensor.pri b/IDEs/qtcreator/include/tensor/tensor.pri index 4cfae5d9f..112376c11 100644 --- a/IDEs/qtcreator/include/tensor/tensor.pri +++ b/IDEs/qtcreator/include/tensor/tensor.pri @@ -1,14 +1,11 @@ HEADERS += \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/algorithms.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/dynamic_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/dynamic_strides.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression_evaluation.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents_functions.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/fixed_rank_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/fixed_rank_strides.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/functions.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/index.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/index_functions.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/layout.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/multi_index.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/multi_index_utility.hpp \ @@ -16,24 +13,43 @@ HEADERS += \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_arithmetic.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_comparison.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/ostream.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/static_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/static_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/strides.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tags.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor_core.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor_engine.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/concepts.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/type_traits.hpp + +HEADERS += \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/traits/storage_traits.hpp + +HEADERS += \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static_size.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_base.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_functions.hpp + + +HEADERS += \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_core.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_static.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp + + HEADERS += \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/basic_type_traits.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/storage_traits.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_dynamic_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_dynamic_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_fixed_rank_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_fixed_rank_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_static_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_static_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_tensor.hpp + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/inner_prod.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/init.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/outer_prod.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/trans.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/norm.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/imag.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/real.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/conj.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/reshape.hpp diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 5966e27d6..8deee2f99 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -1,38 +1,46 @@ TEMPLATE = app -TARGET = test +TARGET = test_tensor +CONFIG += staticlib depend_includepath console CONFIG -= qt -CONFIG += depend_includepath debug -win*: CONFIG += console +CONFIG += c++20 #QMAKE_CXXFLAGS += -fno-inline -QMAKE_CXXFLAGS += -std=c++17 -#QMAKE_CXXFLAGS += -Wno-unknown-pragmas -#QMAKE_CXXFLAGS += --coverage +QMAKE_CXXFLAGS =-std=c++20 +QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra +QMAKE_CXXFLAGS +=-Wno-unknown-pragmas +QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable + + +gcc:QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp +clang: QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp=libiomp5 +gcc:QMAKE_CXXFLAGS_DEBUG = -g +clang: QMAKE_CXXFLAGS_DEBUG =-g -DEFINES += BOOST_UBLAS_NO_EXCEPTIONS -win*: DEFINES += _SCL_SECURE_NO_WARNINGS -#Visual age IBM -xlc: DEFINES += BOOST_UBLAS_NO_ELEMENT_PROXIES +#QMAKE_CXXFLAGS += --coverage + +BOOST_ROOT=../../../../../.. -# If ublas tests are build with boost source code then, -# then boost headers and boost libraries should be used. -#exists(../../../../../../boost-build.jam) { -# INCLUDEPATH += ../../../../../.. -# LIBS += -L../../../../../../stage/lib -# QMAKE_RPATHDIR += ../../../../../../stage/lib +#exists( $$BOOST_ROOT/boost-build.jam ) { +# message("Boost installed.") +# INCLUDEPATH += $${BOOST_ROOT}/libs/numeric/ublas/include +# LIBS += -L$${BOOST_ROOT}/stage/lib -lgomp +# QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib #} -INCLUDEPATH += /usr/local/include -INCLUDEPATH += ../../../include -LIBS += -L/usr/local/lib -LIBS +=-lboost_unit_test_framework -# -lgcov +QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib +INCLUDEPATH+=$$BOOST_ROOT/libs/numeric/ublas/include +LIBS+=-L$${BOOST_ROOT}/stage/lib -lboost_unit_test_framework -lgomp +#message("INCLUDEPATH: $${INCLUDEPATH}") + +INCLUDE_DIR=$${BOOST_ROOT}/libs/numeric/ublas/include TEST_DIR = ../../../test/tensor +include(../include/tensor/tensor.pri) + HEADERS += \ $${TEST_DIR}/utility.hpp @@ -41,7 +49,8 @@ SOURCES += \ $${TEST_DIR}/test_einstein_notation.cpp \ $${TEST_DIR}/test_expression.cpp \ $${TEST_DIR}/test_expression_evaluation.cpp \ - $${TEST_DIR}/test_extents.cpp \ + $${TEST_DIR}/test_extents_dynamic.cpp \ + $${TEST_DIR}/test_extents_dynamic_rank_static.cpp \ $${TEST_DIR}/test_fixed_rank_expression_evaluation.cpp \ $${TEST_DIR}/test_fixed_rank_extents.cpp \ $${TEST_DIR}/test_fixed_rank_functions.cpp \ @@ -66,8 +75,5 @@ SOURCES += \ $${TEST_DIR}/test_static_tensor_matrix_vector.cpp \ $${TEST_DIR}/test_strides.cpp \ $${TEST_DIR}/test_tensor.cpp \ - $${TEST_DIR}/test_tensor_matrix_vector.cpp - - -INCLUDEPATH += \ - ../../../include + $${TEST_DIR}/test_tensor_matrix_vector.cpp \ + $${TEST_DIR}/test_extents_functions.cpp diff --git a/IDEs/qtcreator/tests.pri b/IDEs/qtcreator/tests.pri index 7b55d478c..04e131f59 100644 --- a/IDEs/qtcreator/tests.pri +++ b/IDEs/qtcreator/tests.pri @@ -1,72 +1,72 @@ SUBDIRS += \ - begin_end \ - comp_mat_erase \ - concepts \ - num_columns \ - num_rows \ - placement_new \ - size \ - sparse_view_test \ - test1 \ - test2 \ - test3 \ - test3_coo \ - test3_mvov \ - test4 \ - test5 \ - test6 \ - test7 \ - test_assignment \ - test_banded_storage_layout \ - test_complex_norms \ - test_coordinate_matrix_inplace_merge \ - test_coordinate_matrix_sort \ - test_coordinate_matrix_always_do_full_sort \ - test_coordinate_vector_inplace_merge \ - test_fixed_containers \ - test_inplace_solve_basic \ - test_inplace_solve_sparse \ - test_inplace_solve_mvov \ - test_lu \ - test_matrix_vector \ - test_ticket7296 \ - test_triangular \ - triangular_access \ - triangular_layout \ - test_tensor +# begin_end \ +# comp_mat_erase \ +# concepts \ +# num_columns \ +# num_rows \ +# placement_new \ +# size \ +# sparse_view_test \ +# test1 \ +# test2 \ +# test3 \ +# test3_coo \ +# test3_mvov \ +# test4 \ +# test5 \ +# test6 \ +# test7 \ +# test_assignment \ +# test_banded_storage_layout \ +# test_complex_norms \ +# test_coordinate_matrix_inplace_merge \ +# test_coordinate_matrix_sort \ +# test_coordinate_matrix_always_do_full_sort \ +# test_coordinate_vector_inplace_merge \ +# test_fixed_containers \ +# test_inplace_solve_basic \ +# test_inplace_solve_sparse \ +# test_inplace_solve_mvov \ +# test_lu \ +# test_matrix_vector \ +# test_ticket7296 \ +# test_triangular \ +# triangular_access \ +# triangular_layout \ + # test_tensor -begin_end.file = test/begin_end.pro -comp_mat_erase.file = test/comp_mat_erase.pro -concepts.file = test/concepts.pro -num_columns.file = test/num_columns.pro -num_rows.file = test/num_rows.pro -placement_new.file = test/placement_new.pro -size.file = test/size.pro -sparse_view_test.file = test/sparse_view_test.pro -test1.file = test/test1.pro -test2.file = test/test2.pro -test3.file = test/test3.pro -test3_coo.file = test/test3_coo.pro -test3_mvov.file = test/test3_mvov.pro -test4.file = test/test4.pro -test5.file = test/test5.pro -test6.file = test/test6.pro -test7.file = test/test7.pro -test_assignment.file = test/test_assignment.pro -test_banded_storage_layout.file = test/test_banded_storage_layout.pro -test_complex_norms.file = test/test_complex_norms.pro -test_coordinate_matrix_inplace_merge.file = test/test_coordinate_matrix_inplace_merge.pro -test_coordinate_matrix_sort.file = test/test_coordinate_matrix_sort.pro -test_coordinate_matrix_always_do_full_sort.file = test/test_coordinate_matrix_always_do_full_sort.pro -test_coordinate_vector_inplace_merge.file = test/test_coordinate_vector_inplace_merge.pro -test_fixed_containers.file = test/test_fixed_containers.pro -test_inplace_solve_basic.file = test/test_inplace_solve_basic.pro -test_inplace_solve_sparse.file = test/test_inplace_solve_sparse.pro -test_inplace_solve_mvov.file = test/test_inplace_solve_mvov.pro -test_lu.file = test/test_lu.pro -test_matrix_vector.file = test/test_matrix_vector.pro -test_ticket7296.file = test/test_ticket7296.pro -test_triangular.file = test/test_triangular.pro -triangular_access.file = test/triangular_access.pro -triangular_layout.file = test/triangular_layout.pro -test_tensor.file = test/test_tensor.pro +#begin_end.file = test/begin_end.pro +#comp_mat_erase.file = test/comp_mat_erase.pro +#concepts.file = test/concepts.pro +#num_columns.file = test/num_columns.pro +#num_rows.file = test/num_rows.pro +#placement_new.file = test/placement_new.pro +#size.file = test/size.pro +#sparse_view_test.file = test/sparse_view_test.pro +#test1.file = test/test1.pro +#test2.file = test/test2.pro +#test3.file = test/test3.pro +#test3_coo.file = test/test3_coo.pro +#test3_mvov.file = test/test3_mvov.pro +#test4.file = test/test4.pro +#test5.file = test/test5.pro +#test6.file = test/test6.pro +#test7.file = test/test7.pro +#test_assignment.file = test/test_assignment.pro +#test_banded_storage_layout.file = test/test_banded_storage_layout.pro +#test_complex_norms.file = test/test_complex_norms.pro +#test_coordinate_matrix_inplace_merge.file = test/test_coordinate_matrix_inplace_merge.pro +#test_coordinate_matrix_sort.file = test/test_coordinate_matrix_sort.pro +#test_coordinate_matrix_always_do_full_sort.file = test/test_coordinate_matrix_always_do_full_sort.pro +#test_coordinate_vector_inplace_merge.file = test/test_coordinate_vector_inplace_merge.pro +#test_fixed_containers.file = test/test_fixed_containers.pro +#test_inplace_solve_basic.file = test/test_inplace_solve_basic.pro +#test_inplace_solve_sparse.file = test/test_inplace_solve_sparse.pro +#test_inplace_solve_mvov.file = test/test_inplace_solve_mvov.pro +#test_lu.file = test/test_lu.pro +#test_matrix_vector.file = test/test_matrix_vector.pro +#test_ticket7296.file = test/test_ticket7296.pro +#test_triangular.file = test/test_triangular.pro +#triangular_access.file = test/triangular_access.pro +#triangular_layout.file = test/triangular_layout.pro +#test_tensor.file = test/test_tensor.pro diff --git a/IDEs/qtcreator/ublas_develop.pro b/IDEs/qtcreator/ublas_develop.pro index e509e747e..49fc2d99c 100644 --- a/IDEs/qtcreator/ublas_develop.pro +++ b/IDEs/qtcreator/ublas_develop.pro @@ -4,7 +4,9 @@ SUBDIRS = include # examples # benchmarks OTHER_FILES += ../../changelog.txt -include (tests.pri) +#include (tests.pri) + + diff --git a/README.md b/README.md index 22ae6db18..1e9ec9223 100644 --- a/README.md +++ b/README.md @@ -8,64 +8,53 @@ Boost Linear and Multilinear Algebra Library [![Mailing List](https://img.shields.io/badge/ublas-mailing%20list-4eb899.svg)](https://lists.boost.org/mailman/listinfo.cgi/ublas) [![Gitter](https://img.shields.io/badge/ublas-chat%20on%20gitter-4eb899.svg)](https://gitter.im/boostorg/ublas) -**Boost.uBLAS** is part of the [Boost C++ Libraries](http://github.com/boostorg). It is directed towards scientific computing on the level of basic linear and multilinear algebra operations with tensors, matrices and vectors. +[![Windows](https://github.com/boostorg/ublas/actions/workflows/windows.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/windows.yml) +[![Linux](https://github.com/boostorg/ublas/actions/workflows/linux.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/linux.yml) +[![Apple MacOS](https://github.com/boostorg/ublas/actions/workflows/apple.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/apple.yml) +[![Clang Sanitizer](https://github.com/boostorg/ublas/actions/workflows/sanitizer.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/sanitizer.yml) +[![Clang Tidy](https://github.com/boostorg/ublas/actions/workflows/clangtidy.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/clangtidy.yml) +[![Codecov](https://codecov.io/gh/boostorg/ublas/branch/master/graph/badge.svg)](https://codecov.io/gh/boostorg/ublas/branch/master) +**Boost.uBLAS** is part of the [Boost C++ Libraries](http://github.com/boostorg). +It is directed towards scientific computing on the level of basic linear and multilinear algebra operations with tensors, matrices and vectors. + ## Documentation uBLAS is documented at [boost.org](https://www.boost.org/doc/libs/1_69_0/libs/numeric/ublas/doc/index.html). -The tensor extension has also a [wiki page](https://github.com/BoostGSoC18/tensor/wiki). +The tensor extension has also a [wiki page](https://github.com/boostorg/ublas/wiki/Tensor). ## License Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt). ## Properties -* Header-only -* Tensor extension requires C++17 compatible compiler +* header-only +* requires C++20 compatible compiler + * gcc version >= 10.x.x + * clang version >= 10.x.x + * msvc version >= 14.28 * Unit-tests require Boost.Test -## Build Status - - -#### Tensor Build & Test - -| Operating System | Compiler | [`master`](https://github.com/boostorg/ublas/tree/master) | [`develop`](https://github.com/boostorg/ublas/tree/develop) | -| :-------------------------: | :-----------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | -| Linux (Ubuntu 20.04 x86_64) | gcc-{7, 8, 9, 10} | [![Linux GCC Debug](https://github.com/boostorg/ublas/workflows/Linux%20GCC%20Debug/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+GCC%22+branch%3Amaster) | [![Linux GCC Debug](https://github.com/boostorg/ublas/workflows/Linux%20GCC%20Debug/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+GCC%22+branch%3Adevelop) | -| Linux (Ubuntu 20.04 x86_64) | clang-{6, 8, 9, 10} | [![Linux Clang Release](https://github.com/boostorg/ublas/workflows/Linux%20Clang%20Release/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+Clang%22+branch%3Amaster) | [![Linux Clang Release](https://github.com/boostorg/ublas/workflows/Linux%20Clang%20Release/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+Clang%22+branch%3Adevelop) | -| Windows 10 (x86_64) | msvc-{14.16, 14.26} | [![Windows MSVC](https://github.com/boostorg/ublas/workflows/Windows%20MSVC/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Windows+MSVC%22+branch%3Amaster) | [![Windows MSVC](https://github.com/boostorg/ublas/workflows/Windows%20MSVC/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Windows+MSVC%22+branch%3Adevelop) | -| MacOS Catalina (x86_64) | clang-11 | [![Apple Clang](https://github.com/boostorg/ublas/workflows/Apple%20Clang/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Apple+Clang%22+branch%3Amaster) | [![Apple Clang](https://github.com/boostorg/ublas/workflows/Apple%20Clang/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Apple+Clang%22+branch%3Adevelop) | - -#### Tensor Additional Checks - -| Checks | [`master`](https://github.com/boostorg/ublas/tree/master) | [`develop`](https://github.com/boostorg/ublas/tree/develop) | -| :-----------: | :----------------------------------------------------------: | :----------------------------------------------------------: | -| UB Sanitizer | [![Undefined Behaviour Sanitizer](https://github.com/boostorg/ublas/workflows/Undefined%20Behaviour%20Sanitizer/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Undefined+Behaviour+Sanitizer%22+branch%3Amaster) | [![Undefined Behaviour Sanitizer](https://github.com/boostorg/ublas/workflows/Undefined%20Behaviour%20Sanitizer/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Undefined+Behaviour+Sanitizer%22+branch%3Adevelop) | -| TH Sanitizer | [![Thread Sanitizer](https://github.com/boostorg/ublas/workflows/Thread%20Sanitizer/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Thread+Sanitizer%22+branch%3Amaster) | [![Thread Sanitizer](https://github.com/boostorg/ublas/workflows/Thread%20Sanitizer/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Thread+Sanitizer%22+branch%3Adevelop) | -| ADD Sanitizer | [![Address Sanitizer](https://github.com/boostorg/ublas/workflows/Address%20Sanitizer/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Address+Sanitizer%22+branch%3Amaster) | [![Address Sanitizer](https://github.com/boostorg/ublas/workflows/Address%20Sanitizer/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Address+Sanitizer%22+branch%3Adevelop) | -| Codecov | [![codecov](https://codecov.io/gh/boostorg/ublas/branch/master/graph/badge.svg)](https://codecov.io/gh/boostorg/ublas/branch/master) | [![codecov](https://codecov.io/gh/boostorg/ublas/branch/develop/graph/badge.svg)](https://codecov.io/gh/boostorg/ublas/branch/develop) | -| Clang-Format | [![Code Format](https://github.com/boostorg/ublas/workflows/Code%20Format/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Code+Format%22+branch%3Amaster) | [![Code Format](https://github.com/boostorg/ublas/workflows/Code%20Format/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Code+Format%22+branch%3Adevelop) | -| Clang-Tidy | [![Clang tidy checks](https://github.com/boostorg/ublas/workflows/Clang%20tidy%20checks/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Clang+tidy+checks%22+branch%3Amaster) | [![Clang tidy checks](https://github.com/boostorg/ublas/workflows/Clang%20tidy%20checks/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Clang+tidy+checks%22+branch%3Adevelop) | +## Simple Example -#### Tensor Build Information +```cpp +#include +#include -| OS | Toolchain | Compiler Flags | -| :----------------: | :-------------: | :-----------------------------------------------: | -| Linux Ubuntu 20.04 | GCC | `-O0` | -| Linux Ubuntu 20.04 | Clang | `-O3` | -| Windows 10 | MSVC | No Special Flags | -| MacOS Catalina | Clang | No Special Flags | -| UB Sanitizer | GCC and Clang | `"-g -fsanitize=undefined"` | -| TH Sanitizer | GCC and Clang | `"-g -fsanitize=thread -O2"` | -| ADD Sanitizer | GCC and Clang | `"-g -fsanitize=address -fno-omit-frame-pointer"` | -| Clang Tidy | Clang-Tidy-10 | [Configuration File](.clang-tidy) | -| Clang Format | Clang-Format-10 | [Configuration File](.clang-format) | +int main() +{ + using namespace boost::numeric::ublas::index; + using tensor = boost::numeric::ublas::tensor_dynamic; + auto ones = boost::numeric::ublas::ones{}; -#### uBLAS CI + tensor A = ones(3,4,5); + tensor B = ones(4,6,3,2); -Branch | Travis | Appveyor | Regression | Docs -:-----: | ------ | --------- | ----------- | ----- - [`master`](https://github.com/boostorg/ublas/tree/master) | [![Build Status](https://travis-ci.org/boostorg/ublas.svg?branch=master)](https://travis-ci.org/boostorg/ublas) | [![Build status](https://ci.appveyor.com/api/projects/status/ctu3wnfowa627ful/branch/master?svg=true)](https://ci.appveyor.com/project/stefanseefeld/ublas/branch/master) | [![ublas](https://img.shields.io/badge/ublas-master-blue.svg)](https://www.boost.org/development/tests/master/developer/numeric-ublas.html) | [![Documentation](https://img.shields.io/badge/docs-develop-brightgreen.svg)](http://www.boost.org/doc/libs/release/libs/numeric) - [`develop`](https://github.com/boostorg/ublas/tree/develop) | [![Build Status](https://travis-ci.org/boostorg/ublas.svg?branch=develop)](https://travis-ci.org/boostorg/ublas) | [![Build status](https://ci.appveyor.com/api/projects/status/ctu3wnfowa627ful/branch/develop?svg=true)](https://ci.appveyor.com/project/stefanseefeld/ublas/branch/develop) | [![ublas](https://img.shields.io/badge/ublas-develop-blue.svg)](https://www.boost.org/development/tests/develop/developer/numeric-ublas.html) | [![Documentation](https://img.shields.io/badge/docs-develop-brightgreen.svg)](http://www.boost.org/doc/libs/release/libs/numeric) + tensor C = 2*ones(5,6,2) + A(_i,_j,_k)*B(_j,_l,_i,_m) + 5; + + // Matlab Compatible Formatted Output + std::cout << "C=" << C << ";" << std::endl; +} +``` ## Directories @@ -79,7 +68,7 @@ Branch | Travis | Appveyor | Regression | Docs ## More information -* If you would like to test the library, contribute new feature or a bug fix, see [contribution](https://github.com/boostorg/ublas/wiki/Guidelines-for-Contribution) where the whole development infrastructure and the contributing workflow is explained in details. +* If you would like to test the library, contribute new feature or a bug fix, see [contribution](https://github.com/boostorg/ublas/wiki/Guidelines-for-Contribution). * Ask questions in [stackoverflow](http://stackoverflow.com/questions/ask?tags=c%2B%2B,boost,boost-ublas) with `boost-ublas` or `ublas` tags. * Report [bugs](https://github.com/boostorg/ublas/issues) and be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well. * Submit your patches as pull requests against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt). diff --git a/examples/tensor/.clang-tidy b/examples/tensor/.clang-tidy deleted file mode 100644 index 50e8131ae..000000000 --- a/examples/tensor/.clang-tidy +++ /dev/null @@ -1,11 +0,0 @@ ---- -Checks: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements,-cppcoreguidelines-avoid-magic-numbers,-readability-magic-numbers,-bugprone-exception-escape' -WarningsAsErrors: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements,-cppcoreguidelines-avoid-magic-numbers,-readability-magic-numbers,-bugprone-exception-escape' -HeaderFilterRegex: 'boost\/numeric\/ublas\/tensor\/.*' -AnalyzeTemporaryDtors: false -FormatStyle: file -User: ublas-developers -CheckOptions: - - key: modernize-use-nullptr.NullMacros - value: 'NULL' -... diff --git a/examples/tensor/access_tensor.cpp b/examples/tensor/access_tensor.cpp index ebd7b2fc6..97e797fb8 100644 --- a/examples/tensor/access_tensor.cpp +++ b/examples/tensor/access_tensor.cpp @@ -15,89 +15,102 @@ #include +//NOLINTNEXTLINE int main() { - using namespace boost::numeric::ublas; - using namespace boost::multiprecision; - - { - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - - // creates a three-dimensional tensor with extents 3,4 and 2 - // tensor A stores single-precision floating-point number according - // to the first-order storage format - - auto A = tensor_t{3,4,2}; - - // initializes the tensor with increasing values along the first-index - // using a single index. - auto vf = 1.0f; - for(auto i = 0u; i < A.size(); ++i, vf += 1.0f) - A[i] = vf; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "A=" << A << ";" << std::endl << std::endl; - } - - - { - using value_t = std::complex; - using format_t = boost::numeric::ublas::layout::last_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - - // creates a four-dimensional tensor with extents 5,4,3 and 2 - // tensor A stores complex floating-point extended double precision numbers - // according to the last-order storage format - // and initializes it with the default value. - - auto B = tensor_t(shape_t{5,4,3,2},value_t{}); - - // initializes the tensor with increasing values along the last-index - // using a single-index - auto vc = value_t(0,0); - for(auto i = 0u; i < B.size(); ++i, vc += value_t(1,1)) - B[i] = vc; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "B=" << B << ";" << std::endl << std::endl; - - - auto C = tensor_t(B.extents()); - // computes the complex conjugate of elements of B - // using multi-index notation. - for(auto i = 0u; i < B.size(0); ++i) - for(auto j = 0u; j < B.size(1); ++j) - for(auto k = 0u; k < B.size(2); ++k) - for(auto l = 0u; l < B.size(3); ++l) - C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); - - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "C=" << C << ";" << std::endl << std::endl; - - - - // computes the complex conjugate of elements of B - // using iterators. - auto D = tensor_t(B.extents()); - std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "D=" << D << ";" << std::endl << std::endl; - - // reshaping tensors. - auto new_extents = B.extents().base(); - std::next_permutation( new_extents.begin(), new_extents.end() ); - D.reshape( extents<>(new_extents) ); - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "newD=" << D << ";" << std::endl << std::endl; - } + namespace ublas = boost::numeric::ublas; + + try { + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = ublas::tensor_dynamic; +// constexpr auto ones = ublas::ones{}; + constexpr auto zeros = ublas::zeros{}; + + // creates a three-dimensional tensor with extents 3,4 and 2 + // tensor A stores single-precision floating-point number according + // to the first-order storage format + + tensor A = zeros(3,4,2); + + // initializes the tensor with increasing values along the first-index + // using a single index. + auto vf = 1.0f; + for(auto i = 0u; i < A.size(); ++i, vf += 1.0f) + A[i] = vf; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "A=" << A << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of access-tensor." << std::endl; + } + + + try { + using value = std::complex; + using layout = ublas::layout::last_order; // storage format + using tensor = ublas::tensor_dynamic; + using shape = typename tensor::extents_type; + constexpr auto zeros = ublas::zeros{}; + + + // creates a four-dimensional tensor with extents 5,4,3 and 2 + // tensor A stores complex floating-point extended double precision numbers + // according to the last-order storage format + // and initializes it with the default value. + + //NOLINTNEXTLINE + tensor B = zeros(5,4,3,2); + + // initializes the tensor with increasing values along the last-index + // using a single-index + auto vc = value(0,0); + for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) + B[i] = vc; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "B=" << B << ";" << std::endl << std::endl; + + + auto C = tensor(B.extents()); + // computes the complex conjugate of elements of B + // using multi-index notation. + for(auto i = 0u; i < B.size(0); ++i) + for(auto j = 0u; j < B.size(1); ++j) + for(auto k = 0u; k < B.size(2); ++k) + for(auto l = 0u; l < B.size(3); ++l) + C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); + + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "C=" << C << ";" << std::endl << std::endl; + + + + // computes the complex conjugate of elements of B + // using iterators. + auto D = tensor(B.extents()); + std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "D=" << D << ";" << std::endl << std::endl; + + // reshaping tensors. + auto new_extents = B.extents().base(); + std::next_permutation( new_extents.begin(), new_extents.end() ); + auto E = reshape( D, shape(new_extents) ); + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "E=" << E << ";" << std::endl << std::endl; + + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of access-tensor." << std::endl; + } } diff --git a/examples/tensor/instantiate_tensor.cpp b/examples/tensor/instantiate_tensor.cpp index bb3d8a11e..851716c8d 100644 --- a/examples/tensor/instantiate_tensor.cpp +++ b/examples/tensor/instantiate_tensor.cpp @@ -15,79 +15,107 @@ #include #include -void instantiate_dynamic_tensor() +void instantiate_tensor_dynamic() { + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_dynamic; + constexpr auto ones = ublas::ones{}; - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - - // tensor type has dynamic order and dimensions + + try { + // tensor is resizable has dynamic dimensions // elements are stored contiguously in memory using the 1st-format (column-major) - auto t1 = tensor_t{3,4,2}; + tensor t1 = ones(3,4,2); std::cout << "t1 = " << t1 << std::endl; - auto t2 = tensor_t(shape_t{3,4,2},2.0F); + tensor t2 = 2 * ones(3,4,2); std::cout << "t2 = " << t2 << std::endl; - auto t3 = tensor_t(t2); + tensor t3 = 3*t2 + t1; std::cout << "t3 = " << t3 << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } } -void instantiate_dynamic_tensors_with_static_order() +void instantiate_tensor_dynamics_with_static_order() { + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = boost::numeric::ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_static_rank; + constexpr auto ones = ublas::ones_static_rank{}; - constexpr auto order = 3U; - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::fixed_rank_tensor; - using shape_t = typename tensor_t::extents_type; + try { + // tensor type has static order and dynamic dimensions + // elements are stored contiguously in memory using the 1st-format (column-major) - // tensor type has static order and dynamic dimensions - // elements are stored contiguously in memory using the 1st-format (column-major) + auto t1 = ones(3,4,2); + std::cout << "t1 = " << t1 << std::endl; - auto t1 = tensor_t{3,4,2}; - std::cout << "t1 = " << t1 << std::endl; + tensor t2 = 2*ones(3,4,2); + std::cout << "t2 = " << t2 << std::endl; - auto t2 = tensor_t(shape_t{3,4,2},2.0F); - std::cout << "t2 = " << t2 << std::endl; + tensor t3 = 3*t2 + t1; + std::cout << "t3 = " << t3 << std::endl; - auto t3 = tensor_t(t2); - std::cout << "t3 = " << t3 << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } } -void instantiate_static_tensor() +void instantiate_tensor_static() { - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using shape_t = boost::numeric::ublas::static_extents<3U,4U,2U>; - using tensor_t = boost::numeric::ublas::static_tensor; + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using shape = ublas::extents<3,4,2>; + using tensor = ublas::tensor_static; + constexpr auto ones = ublas::ones_static{}; - // tensor type has static order and static dimensions - // elements are stored contiguously in memory using the 1st-format (column-major) + try { + // tensor type has static order and static dimensions + // elements are stored contiguously in memory using the 1st-format (column-major) - auto t1 = tensor_t{}; - std::cout << "t1 = " << t1 << std::endl; + auto t1 = tensor{}; + std::cout << "t1 = " << t1 << std::endl; + + tensor t2 = 2 * ones(shape{}); + std::cout << "t2 = " << t2 << std::endl; - auto t2 = tensor_t(2.0F); - std::cout << "t2 = " << t2 << std::endl; + tensor t3 = 3*t2 + t1; + std::cout << "t3 = " << t3 << std::endl; - auto t3 = tensor_t(t2); - std::cout << "t3 = " << t3 << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } } int main() { - instantiate_dynamic_tensor(); - instantiate_dynamic_tensors_with_static_order(); - instantiate_static_tensor(); + try{ + instantiate_tensor_dynamic(); + instantiate_tensor_dynamics_with_static_order(); + instantiate_tensor_static(); + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of instantiate-tensor." << std::endl; + } } diff --git a/examples/tensor/multiply_tensors_einstein_notation.cpp b/examples/tensor/multiply_tensors_einstein_notation.cpp index 3609feb95..c7ba3c2c6 100644 --- a/examples/tensor/multiply_tensors_einstein_notation.cpp +++ b/examples/tensor/multiply_tensors_einstein_notation.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -18,131 +18,150 @@ int main() { - using namespace boost::numeric::ublas; - - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - - //using format_t = column_major; - //using value_t = float; - //using shape_t = dynamic_extents<>; - //using tensor_t = dynamic_tensor; - using matrix_t = matrix; - - - using namespace boost::numeric::ublas::index; - - // Tensor-Vector-Multiplications - Including Transposition - { - - auto n = shape_t{3,4,2}; - auto A = tensor_t(n,1); - auto B1 = matrix_t(n[1],n[2],2); - auto v1 = tensor_t(shape_t{n[0],1},2); - auto v2 = tensor_t(shape_t{n[1],1},2); -// auto v3 = tensor_t(shape{n[2],1},2); - - // C1(j,k) = B1(j,k) + A(i,j,k)*v1(i); - // tensor_t C1 = B1 + prod(A,vector_t(n[0],1),1); - tensor_t C1 = B1 + A(_i,_,_) * v1(_i,_); - - // C2(i,k) = A(i,j,k)*v2(j) + 4; - //tensor_t C2 = prod(A,vector_t(n[1],1),2) + 4; - tensor_t C2 = A(_,_i,_) * v2(_i,_) + 4; - - // not yet implemented! - // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); - // tensor_t C3 = prod(prod(prod(A,v1,1),v2,1),v3,1); - // tensor_t C3 = A(_i,_j,_k) * v1(_i,_) * v2(_j,_) * v3(_k,_); - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C1(j,k) = B1(j,k) + A(i,j,k)*v1(i);" << std::endl << std::endl; - std::cout << "C1=" << C1 << ";" << std::endl << std::endl; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C2(i,k) = A(i,j,k)*v2(j) + 4;" << std::endl << std::endl; - std::cout << "C2=" << C2 << ";" << std::endl << std::endl; - - } - - - // Tensor-Matrix-Multiplications - Including Transposition - { - auto n = shape_t{3,4,2}; - auto m = 5u; - auto A = tensor_t(n,2); - auto B = tensor_t(shape_t{n[1],n[2],m},2); - auto B1 = tensor_t(shape_t{m,n[0]},1); - auto B2 = tensor_t(shape_t{m,n[1]},1); - - - // C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i); - // tensor_t C1 = B + prod(A,B1,1); - tensor_t C1 = B + A(_i,_,_) * B1(_,_i); - - // C2(i,l,k) = A(i,j,k)*B2(l,j) + 4; - // tensor_t C2 = prod(A,B2) + 4; - tensor_t C2 = A(_,_j,_) * B2(_,_j) + 4; - - // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); - // not yet implemented. - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i);" << std::endl << std::endl; - std::cout << "C1=" << C1 << ";" << std::endl << std::endl; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C2(i,l,k) = A(i,j,k)*B2(l,j) + 4;" << std::endl << std::endl; - std::cout << "C2=" << C2 << ";" << std::endl << std::endl; - - // formatted output -// std::cout << "% --------------------------- " << std::endl; -// std::cout << "% --------------------------- " << std::endl << std::endl; -// std::cout << "% C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k);" << std::endl << std::endl; -// std::cout << "C3=" << C3 << ";" << std::endl << std::endl; - } - - - // Tensor-Tensor-Multiplications Including Transposition - { - auto na = shape_t{3,4,5}; - auto nb = shape_t{4,6,3,2}; - auto A = tensor_t(na,2); - auto B = tensor_t(nb,3); - auto T1 = tensor_t(shape_t{na[2],na[2]},2); - auto T2 = tensor_t(shape_t{na[2],nb[1],nb[3]},2); - - - // C1(j,l) = T1(j,l) + A(i,j,k)*A(i,j,l) + 5; - // tensor_t C1 = T1 + prod(A,A,perm_t{1,2}) + 5; - tensor_t C1 = T1 + A(_i,_j,_m)*A(_i,_j,_l) + 5; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C1(k,l) = T1(k,l) + A(i,j,k)*A(i,j,l) + 5;" << std::endl << std::endl; - std::cout << "C1=" << C1 << ";" << std::endl << std::endl; - - - // C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; - //tensor_t C2 = T2 + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; - tensor_t C2 = T2 + A(_i,_j,_k)*B(_j,_l,_i,_m) + 5; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5;" << std::endl << std::endl; - std::cout << "C2=" << C2 << ";" << std::endl << std::endl; - - } + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = ublas::tensor_dynamic; + using shape = typename tensor::extents_type; + using matrix = ublas::matrix; + + constexpr auto ones = ublas::ones{}; + + // NOLINTNEXTLINE(google-build-using-namespace) + using namespace boost::numeric::ublas::index; + + using namespace boost::numeric::ublas::index; + using tensor = boost::numeric::ublas::tensor_dynamic; + auto fones = boost::numeric::ublas::ones{}; + + + tensor X = fones(3,4,5); + tensor Y = fones(4,6,3,2); + + tensor Z = 2*ones(5,6,2) + X(_i,_j,_k)*Y(_j,_l,_i,_m) + 5; + + // Matlab Compatible Formatted Output + std::cout << "C=" << Z << ";" << std::endl; + + + // Tensor-Vector-Multiplications - Including Transposition + try { + + auto n = shape{3,4,2}; + + tensor A = ones(n); + matrix B1 = 2*matrix(n[1],n[2]); + tensor v1 = 2*ones(n[0],1); + tensor v2 = 2*ones(n[1],1); + // auto v3 = tensor(shape{n[2],1},2); + + // C1(j,k) = B1(j,k) + A(i,j,k)*v1(i); + // tensor C1 = B1 + prod(A,vector_t(n[0],1),1); + tensor C1 = B1 + A(_i,_,_) * v1(_i,_); + + // C2(i,k) = A(i,j,k)*v2(j) + 4; + //tensor C2 = prod(A,vector_t(n[1],1),2) + 4; + tensor C2 = A(_,_i,_) * v2(_i,_) + 4; + + // not yet implemented! + // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); + // tensor C3 = prod(prod(prod(A,v1,1),v2,1),v3,1); + // tensor C3 = A(_i,_j,_k) * v1(_i,_) * v2(_j,_) * v3(_k,_); + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C1(j,k) = B1(j,k) + A(i,j,k)*v1(i);" << std::endl << std::endl; + std::cout << "C1=" << C1 << ";" << std::endl << std::endl; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C2(i,k) = A(i,j,k)*v2(j) + 4;" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-einstein-notation when doing tensor-vector multiplication." << std::endl; + } + + // Tensor-Matrix-Multiplications - Including Transposition + try { + auto n = shape{3,4,2}; + auto m = 5u; + tensor A = 2*ones(n); + tensor B = 2*ones(n[1],n[2],m); + tensor B1 = ones(m,n[0]); + tensor B2 = ones(m,n[1]); + + + // C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i); + // tensor C1 = B + prod(A,B1,1); + tensor C1 = B + A(_i,_,_) * B1(_,_i); + + // C2(i,l,k) = A(i,j,k)*B2(l,j) + 4; + // tensor C2 = prod(A,B2) + 4; + tensor C2 = A(_,_j,_) * B2(_,_j) + 4; + + // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); + // not yet implemented. + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i);" << std::endl << std::endl; + std::cout << "C1=" << C1 << ";" << std::endl << std::endl; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C2(i,l,k) = A(i,j,k)*B2(l,j) + 4;" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + + // formatted output + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "% C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k);" << std::endl << std::endl; + // std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-einstein-notation when doing tensor-matrix multiplication." << std::endl; + } + + + // Tensor-Tensor-Multiplications Including Transposition + try { + auto na = shape{3,4,5}; + auto nb = shape{4,6,3,2}; + tensor A = 2*ones(na); + tensor B = 3*ones(nb); + tensor T1 = 2*ones(na[2],na[2]); + tensor T2 = 2*ones(na[2],nb[1],nb[3]); + + + // C1(j,l) = T1(j,l) + A(i,j,k)*A(i,j,l) + 5; + // tensor C1 = T1 + prod(A,A,perm_t{1,2}) + 5; + tensor C1 = T1 + A(_i,_j,_m)*A(_i,_j,_l) + 5; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C1(k,l) = T1(k,l) + A(i,j,k)*A(i,j,l) + 5;" << std::endl << std::endl; + std::cout << "C1=" << C1 << ";" << std::endl << std::endl; + + + // C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; + //tensor C2 = T2 + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; + tensor C2 = T2 + A(_i,_j,_k)*B(_j,_l,_i,_m) + 5; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5;" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-einstein-notation when doing transpose." << std::endl; + } } diff --git a/examples/tensor/multiply_tensors_product_function.cpp b/examples/tensor/multiply_tensors_product_function.cpp index 31a895370..bd2adb34a 100644 --- a/examples/tensor/multiply_tensors_product_function.cpp +++ b/examples/tensor/multiply_tensors_product_function.cpp @@ -10,44 +10,44 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // - -#include #include +#include #include #include void multiply_tensors_with_dynamic_order() { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; - using format_t = column_major; - using value_t = float; // std::complex; - using tensor_t = dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - using matrix_t = matrix; - using vector_t = vector; + using layout = ublas::layout::first_order; + using value = float; // std::complex; + using tensor = ublas::tensor_dynamic; + using matrix = ublas::matrix; + using vector = ublas::vector; + using shape = typename tensor::extents_type; + constexpr auto ones = ublas::ones{}; // Tensor-Vector-Multiplications - Including Transposition - { + try { - auto n = shape_t{3,4,2}; - auto A = tensor_t(n,2); + auto n = shape{3,4,2}; + auto A = tensor(n,2); auto q = 0u; // contraction mode // C1(j,k) = T2(j,k) + A(i,j,k)*T1(i); q = 1u; - tensor_t C1 = matrix_t(n[1],n[2],2) + prod(A,vector_t(n[q-1],1),q); + tensor C1 = matrix(n[1],n[2],2) + ublas::prod(A,vector(n[q-1],1),q); // C2(i,k) = A(i,j,k)*T1(j) + 4; q = 2u; - tensor_t C2 = prod(A,vector_t(n[q-1],1),q) + 4; + tensor C2 = ublas::prod(A,vector(n[q-1],1),q) + 4; // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); - tensor_t C3 = prod(prod(prod(A,vector_t(n[0],1),1),vector_t(n[1],1),1),vector_t(n[2],1),1); + tensor C3 = ublas::prod(ublas::prod(ublas::prod(A,vector(n[0],1),1),vector(n[1],1),1),vector(n[2],1),1); // C4(i,j) = A(k,i,j)*T1(k) + 4; q = 1u; - tensor_t C4 = prod(trans(A,{2,3,1}),vector_t(n[2],1),q) + 4; + tensor C4 = ublas::prod(trans(A,{2,3,1}),vector(n[2],1),q) + 4; // formatted output @@ -74,35 +74,39 @@ void multiply_tensors_with_dynamic_order() std::cout << "% C4(i,j) = A(k,i,j)*T1(k) + 4;" << std::endl << std::endl; std::cout << "C4=" << C4 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } + // Tensor-Matrix-Multiplications - Including Transposition - { + try { - auto n = shape_t{3,4,2}; - auto A = tensor_t(n,2); + auto n = shape{3,4,2}; + tensor A = 2*ones(n);//tensor auto m = 5u; auto q = 0u; // contraction mode // C1(l,j,k) = T2(l,j,k) + A(i,j,k)*T1(l,i); q = 1u; - tensor_t C1 = tensor_t(shape_t{m,n[1],n[2]},2) + prod(A,matrix_t(m,n[q-1],1),q); + tensor C1 = 2*ones(m,n[1],n[2]) + ublas::prod(A,matrix(m,n[q-1],1),q); // C2(i,l,k) = A(i,j,k)*T1(l,j) + 4; q = 2u; - tensor_t C2 = prod(A,matrix_t(m,n[q-1],1),q) + 4; + tensor C2 = ublas::prod(A,matrix(m,n[q-1],1),q) + 4; // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); q = 3u; - tensor_t C3 = prod(prod(A,matrix_t(m+1,n[q-2],1),q-1),matrix_t(m+2,n[q-1],1),q); + tensor C3 = ublas::prod(ublas::prod(A,matrix(m+1,n[q-2],1),q-1),matrix(m+2,n[q-1],1),q); // C4(i,l1,l2) = A(i,j,k)*T2(l2,k)*T1(l1,j); - tensor_t C4 = prod(prod(A,matrix_t(m+2,n[q-1],1),q),matrix_t(m+1,n[q-2],1),q-1); + tensor C4 = ublas::prod(ublas::prod(A,matrix(m+2,n[q-1],1),q),matrix(m+1,n[q-2],1),q-1); // C5(i,k,l) = A(i,k,j)*T1(l,j) + 4; q = 3u; - tensor_t C5 = prod(trans(A,{1,3,2}),matrix_t(m,n[1],1),q) + 4; + tensor C5 = ublas::prod(trans(A,{1,3,2}),matrix(m,n[1],1),q) + 4; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -135,6 +139,9 @@ void multiply_tensors_with_dynamic_order() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C5(i,k,l) = A(i,k,j)*T1(l,j) + 4;" << std::endl << std::endl; std::cout << "C5=" << C5 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the multiply_tensors_with_dynamic_order function of multiply-tensor-product-function." << std::endl; } @@ -142,18 +149,18 @@ void multiply_tensors_with_dynamic_order() // Tensor-Tensor-Multiplications Including Transposition - { + try { using perm_t = std::vector; - auto na = shape_t{3,4,5}; - auto nb = shape_t{4,6,3,2}; - auto A = tensor_t(na,2); - auto B = tensor_t(nb,3); + auto na = shape{3,4,5}; + auto nb = shape{4,6,3,2}; + tensor A = 2*ones(na); //tensor(na,2); + tensor B = 3*ones(nb); //tensor(nb,3); // C1(j,l) = T(j,l) + A(i,j,k)*A(i,j,l) + 5; - tensor_t C1 = tensor_t(shape_t{na[2],na[2]},2) + prod(A,A,perm_t{1,2}) + 5.0F; + tensor C1 = 2*ones(na[2],na[2]) + ublas::prod(A,A,perm_t{1,2}) + 5; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -163,7 +170,7 @@ void multiply_tensors_with_dynamic_order() // C2(k,l,m) = T(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; - tensor_t C2 = tensor_t(shape_t{na[2],nb[1],nb[3]},2) + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; + tensor C2 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -173,7 +180,7 @@ void multiply_tensors_with_dynamic_order() // C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5; - tensor_t C3 = tensor_t(shape_t{na[2],nb[1],nb[3]},2) + prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5; + tensor C3 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -181,44 +188,48 @@ void multiply_tensors_with_dynamic_order() std::cout << "% C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5;" << std::endl << std::endl; std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } } void multiply_tensors_with_static_order() { - using namespace boost::numeric::ublas; - - using format_t = column_major; - using value_t = float; // std::complex; - using matrix_t = matrix; - using vector_t = vector; - using tensor2_t = fixed_rank_tensor; - using tensor3_t = fixed_rank_tensor; -// using tensor4_t = fixed_rank_tensor; -// using shape_t = typename tensor_t::extents_type; -// using shape2_t = typename tensor2_t::extents_type; - using shape3_t = typename tensor3_t::extents_type; -// using shape4_t = typename tensor4_t::extents_type; + namespace ublas = boost::numeric::ublas; + + using layout = ublas::layout::first_order; + using value = float; // std::complex; + using matrix = ublas::matrix; + using vector = ublas::vector; + using tensor2 = ublas::tensor_static_rank; + using tensor3 = ublas::tensor_static_rank; + using tensor4 = ublas::tensor_static_rank; + using shape2 = typename tensor2::extents_type; + using shape3 = typename tensor3::extents_type; + using shape4 = typename tensor4::extents_type; + + constexpr auto ones = ublas::ones_static_rank{}; // Tensor-Vector-Multiplications - Including Transposition // dynamic_extents with static rank - { + try { - auto n = shape3_t{3,4,2}; - auto A = tensor3_t(n,value_t(2)); + auto n = shape3{3,4,2}; + tensor3 A = 2*ones(n); auto q = 0U; // contraction mode // C1(j,k) = T2(j,k) + A(i,j,k)*T1(i); q = 1U; - tensor2_t C1 = matrix_t(n[1],n[2],2) + prod(A,vector_t(n[q-1],1),q); + tensor2 C1 = matrix(n[1],n[2],2) + ublas::prod(A,vector(n[q-1],1),q); // C2(i,k) = A(i,j,k)*T1(j) + 4; q = 2U; - tensor2_t C2 = prod(A,vector_t(n[q-1],1),q) + 4; + tensor2 C2 = ublas::prod(A,vector(n[q-1],1),q) + 4; // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); - tensor2_t C3 = prod(prod(prod(A,vector_t(n[0],1),1),vector_t(n[1],1),1),vector_t(n[2],1),1); + tensor2 C3 = ublas::prod(ublas::prod(ublas::prod(A,vector(n[0],1),1),vector(n[1],1),1),vector(n[2],1),1); // formatted output @@ -239,31 +250,34 @@ void multiply_tensors_with_static_order() std::cout << "% C3() = A(i,j,k)*T1(i)*T2(j)*T2(k);" << std::endl << std::endl; std::cout << "C3()=" << C3(0) << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } // Tensor-Matrix-Multiplications - Including Transposition // dynamic_extents with static rank - { + try { - auto n = shape3_t{3,4,2}; - auto A = tensor3_t(n,value_t(2)); + auto n = shape3{3,4,2}; + tensor3 A = 2*ones(n); auto m = 5U; auto q = 0U; // contraction mode // C1(l,j,k) = T2(l,j,k) + A(i,j,k)*T1(l,i); q = 1U; - tensor3_t C1 = tensor3_t( shape3_t{m,n[1],n[2]},value_t(2) ) + prod(A,matrix_t(m,n[q-1],1),q); + tensor3 C1 = 2*ones(m,n[1],n[2]) + ublas::prod(A,matrix(m,n[q-1],1),q); // C2(i,l,k) = A(i,j,k)*T1(l,j) + 4; q = 2U; - tensor3_t C2 = prod(A,matrix_t(m,n[q-1],1),q) + 4 ; + tensor3 C2 = ublas::prod(A,matrix(m,n[q-1],1),q) + 4 ; // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); q = 3U; - tensor3_t C3 = prod(prod(A,matrix_t(m+1,n[q-2],1),q-1),matrix_t(m+2,n[q-1],1),q) ; + tensor3 C3 = ublas::prod(ublas::prod(A,matrix(m+1,n[q-2],1),q-1),matrix(m+2,n[q-1],1),q) ; // C4(i,l1,l2) = A(i,j,k)*T2(l2,k)*T1(l1,j); - tensor3_t C4 = prod(prod(A,matrix_t(m+2,n[q-1],1),q),matrix_t(m+1,n[q-2],1),q-1) ; + tensor3 C4 = ublas::prod(ublas::prod(A,matrix(m+2,n[q-1],1),q),matrix(m+1,n[q-2],1),q-1) ; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -288,57 +302,69 @@ void multiply_tensors_with_static_order() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C4(i,l1,l2) = A(i,j,k)*T2(l2,k)*T1(l1,j);" << std::endl << std::endl; std::cout << "C4=" << C4 << ";" << std::endl << std::endl; - std::cout << "% C3 and C4 should have the same values, true? " << std::boolalpha << (C3 == C4) << "!" << std::endl; + //std::cout << "% C3 and C4 should have the same values, true? " << std::boolalpha << (C3 == C4) << "!" << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } // Tensor-Tensor-Multiplications Including Transposition // dynamic_extents with static rank - { + try { -// using perm_t = std::array; + using perm_t = std::array; -// auto na = shape3_t{3,4,5}; -// auto nb = shape4_t{4,6,3,2}; -// auto nc = shape2_t{5,5}; -// auto A = tensor3_t(na,2.0F); -// auto B = tensor4_t(nb,3.0F); -// auto C = tensor2_t(nc,2.0F); + auto na = shape3{3,4,5}; + auto nb = shape4{4,6,3,2}; + auto nc = shape2{5,5}; + tensor3 A = 2*ones(na); + tensor4 B = 3*ones(nb); + tensor2 C = 2*ones(nc); // C1(j,l) = T(j,l) + A(i,j,k)*A(i,j,l) + 5; // Right now there exist no tensor other than dynamic_extents with // dynamic rank so every tensor times tensor operator automatically // to dynamic tensor -// auto C1 = C + prod(A,A,perm_t{1,2}) + 5.0F; + auto C1 = C + ublas::prod(A,A,perm_t{1,2}) + 5; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C1(k,l) = T(k,l) + A(i,j,k)*A(i,j,l) + 5;" << std::endl << std::endl; -// std::cout << "C1=" << tensor_t(C1) << ";" << std::endl << std::endl; + std::cout << "C1=" << tensor2(C1) << ";" << std::endl << std::endl; // C2(k,l,m) = T(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; // Similar Problem as above -// tensor_t C2 = tensor_t(shape_t{na[2],nb[1],nb[3]},2.0F) + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5.0F; + tensor3 C2 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C2(k,l,m) = T(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5;" << std::endl << std::endl; - //std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; // C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5; // Similar Problem as above -// tensor_t C3 = tensor_t(shape_t{na[2],nb[1],nb[3]},2.0F) + prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5.0F; + tensor3 C3 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5;" << std::endl << std::endl; -// std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the multiply_tensors_with_static_order function of multiply-tensor-product-function." << std::endl; + throw; } } int main() { - multiply_tensors_with_dynamic_order(); - multiply_tensors_with_static_order(); + try { + multiply_tensors_with_dynamic_order(); + multiply_tensors_with_static_order(); + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; + } } diff --git a/examples/tensor/simple_expressions.cpp b/examples/tensor/simple_expressions.cpp index 229bfbf5f..81c6e1cf8 100644 --- a/examples/tensor/simple_expressions.cpp +++ b/examples/tensor/simple_expressions.cpp @@ -18,19 +18,23 @@ int main() { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; + using value = float; + using tensor = ublas::tensor_dynamic; + using matrix = ublas::matrix; + using vector = ublas::vector; + using shape = tensor::extents_type; - using tensorf = dynamic_tensor; - using matrixf = matrix; - using vectorf = vector; + try { - auto A = tensorf{3,4,2}; + + auto A = tensor{3,4,2}; auto B = A = 2; // Calling overloaded operators // and using simple tensor expression templates. if( A != (B+1) ){ - A += 2*B - 1; + A += 2*B - 1; } // formatted output @@ -38,14 +42,14 @@ int main() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; - auto n = extents<>{3,4}; - auto D = matrixf(n[0],n[1],1); - auto e = vectorf(n[1],1); - auto f = vectorf(n[0],2); + auto n = shape{3,4}; + auto D = matrix(n[0],n[1],1); + auto e = vector(n[1],1); + auto f = vector(n[0],2); // Calling constructor with // vector expression templates - tensorf C = 2*f; + tensor C = 2*f; // formatted output std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; @@ -54,12 +58,16 @@ int main() // Calling overloaded operators // and mixing simple tensor and matrix expression templates - tensorf F = 3*C + 4*prod(2*D,e); + tensor F = 3*C + 4*prod(2*D,e); // formatted output std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "F=" << F << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of simple expression." << std::endl; + } } diff --git a/include/boost/numeric/ublas/tensor.hpp b/include/boost/numeric/ublas/tensor.hpp index b1fa1f428..e076aebac 100644 --- a/include/boost/numeric/ublas/tensor.hpp +++ b/include/boost/numeric/ublas/tensor.hpp @@ -1,5 +1,4 @@ -// Copyright (c) 2018-2019 -// Cem Bassoy +// Copyright (c) 2018 Cem Bassoy // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/numeric/ublas/tensor/algorithms.hpp b/include/boost/numeric/ublas/tensor/algorithms.hpp index 8ca98ba61..66375e1c0 100644 --- a/include/boost/numeric/ublas/tensor/algorithms.hpp +++ b/include/boost/numeric/ublas/tensor/algorithms.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,17 +10,14 @@ // -#ifndef _BOOST_UBLAS_TENSOR_ALGORITHMS_HPP -#define _BOOST_UBLAS_TENSOR_ALGORITHMS_HPP +#ifndef BOOST_UBLAS_TENSOR_ALGORITHMS_HPP +#define BOOST_UBLAS_TENSOR_ALGORITHMS_HPP - -#include #include #include +#include -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas { @@ -38,37 +35,45 @@ namespace ublas { */ template constexpr void copy(const SizeType p, SizeType const*const n, - PointerOut c, SizeType const*const wc, - PointerIn a, SizeType const*const wa) + PointerOut c, SizeType const*const wc, + PointerIn a, SizeType const*const wa) { - static_assert( std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::copy: Argument types for pointers are not pointer types."); - if( p == 0 ) - return; - - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); - - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); - - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); - - - std::function lambda; - - lambda = [&lambda, n, wc, wa](SizeType r, PointerOut c, PointerIn a) - { - if(r > 0) - for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d) - lambda(r-1, c, a ); - else - for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d) - *c = *a; - }; - - lambda( p-1, c, a ); + static_assert( std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::copy: Argument types for pointers are not pointer types."); + if( p == 0 ){ + return; + } + + if(c == nullptr || a == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); + } + + if(wc == nullptr || wa == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); + } + + if(n == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); + } + + + std::function lambda; + + lambda = [&lambda, n, wc, wa](SizeType r, PointerOut c, PointerIn a) + { + if(r > 0){ + for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d){ + lambda(r-1, c, a ); + } + } + else{ + for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d){ + *c = *a; + } + } + }; + + lambda( p-1, c, a ); } @@ -87,40 +92,40 @@ constexpr void copy(const SizeType p, SizeType const*const n, * @param[in] op unary operation */ template -constexpr void transform(const SizeType p, - SizeType const*const n, - PointerOut c, SizeType const*const wc, - PointerIn a, SizeType const*const wa, - UnaryOp op) +constexpr void transform(SizeType const p, + SizeType const*const n, + PointerOut c, SizeType const*const wc, + PointerIn a, SizeType const*const wa, + UnaryOp op) { - static_assert( std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); - if( p == 0 ) - return; + static_assert( std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); + if( p == 0 ) + return; - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(c == nullptr || a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(wc == nullptr || wa == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(n == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, n, wc, wa, op](SizeType r, PointerOut c, PointerIn a) - { - if(r > 0) - for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d) - lambda(r-1, c, a); - else - for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d) - *c = op(*a); - }; + lambda = [&lambda, n, wc, wa, op](SizeType r, PointerOut c, PointerIn a) + { + if(r > 0) + for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d) + lambda(r-1, c, a); + else + for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d) + *c = op(*a); + }; - lambda( p-1, c, a ); + lambda( p-1, c, a ); } @@ -138,39 +143,39 @@ constexpr void transform(const SizeType p, template [[nodiscard]] constexpr ValueType accumulate(SizeType const p, SizeType const*const n, - PointerIn a, SizeType const*const w, - ValueType k) + PointerIn a, SizeType const*const w, + ValueType k) { - static_assert(std::is_pointer::value, - "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); + static_assert(std::is_pointer::value, + "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); - if( p == 0 ) - return k; + if( p == 0 ) + return k; - if(a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(w == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(w == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(n == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, n, w](SizeType r, PointerIn a, ValueType k) - { - if(r > 0u) - for(auto d = 0u; d < n[r]; a += w[r], ++d) - k = lambda(r-1, a, k); - else - for(auto d = 0u; d < n[0]; a += w[0], ++d) - k += *a; - return k; - }; + lambda = [&lambda, n, w](SizeType r, PointerIn a, ValueType k) + { + if(r > 0u) + for(auto d = 0u; d < n[r]; a += w[r], ++d) + k = lambda(r-1, a, k); + else + for(auto d = 0u; d < n[0]; a += w[0], ++d) + k += *a; + return k; + }; - return lambda( p-1, a, k ); + return lambda( p-1, a, k ); } /** @brief Performs a reduce operation with all elements of the tensor and an initial value @@ -188,40 +193,40 @@ constexpr ValueType accumulate(SizeType const p, SizeType const*const n, template [[nodiscard]] constexpr ValueType accumulate(SizeType const p, SizeType const*const n, - PointerIn a, SizeType const*const w, - ValueType k, BinaryOp op) + PointerIn a, SizeType const*const w, + ValueType k, BinaryOp op) { - static_assert(std::is_pointer::value, - "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); + static_assert(std::is_pointer::value, + "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); - if( p == 0 ) - return k; + if( p == 0 ) + return k; - if(a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(w == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(w == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(n == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, n, w, op](SizeType r, PointerIn a, ValueType k) - { - if(r > 0u) - for(auto d = 0u; d < n[r]; a += w[r], ++d) - k = lambda(r-1, a, k); - else - for(auto d = 0u; d < n[0]; a += w[0], ++d) - k = op ( k, *a ); - return k; - }; + lambda = [&lambda, n, w, op](SizeType r, PointerIn a, ValueType k) + { + if(r > 0u) + for(auto d = 0u; d < n[r]; a += w[r], ++d) + k = lambda(r-1, a, k); + else + for(auto d = 0u; d < n[0]; a += w[0], ++d) + k = op ( k, *a ); + return k; + }; - return lambda( p-1, a, k ); + return lambda( p-1, a, k ); } /** @brief Transposes a tensor @@ -241,45 +246,45 @@ constexpr ValueType accumulate(SizeType const p, SizeType const*const n, template constexpr void trans( SizeType const p, SizeType const*const na, SizeType const*const pi, - PointerOut c, SizeType const*const wc, - PointerIn a, SizeType const*const wa) + PointerOut c, SizeType const*const wc, + PointerIn a, SizeType const*const wa) { - static_assert( std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::trans: Argument types for pointers are not pointer types."); + static_assert( std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::trans: Argument types for pointers are not pointer types."); - if( p < 2) - return; + if( p < 2) + return; - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(c == nullptr || a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - if(na == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null."); + if(na == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null."); - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(wc == nullptr || wa == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - if(na == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(na == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - if(pi == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(pi == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, na, wc, wa, pi](SizeType r, PointerOut c, PointerIn a) - { - if(r > 0) - for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) - lambda(r-1, c, a); - else - for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) - *c = *a; - }; + lambda = [&lambda, na, wc, wa, pi](SizeType r, PointerOut c, PointerIn a) + { + if(r > 0) + for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) + lambda(r-1, c, a); + else + for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) + *c = *a; + }; - lambda( p-1, c, a ); + lambda( p-1, c, a ); } @@ -299,49 +304,47 @@ constexpr void trans( SizeType const p, SizeType const*const na, SizeType const */ template -constexpr void trans( SizeType const p, - SizeType const*const na, - SizeType const*const pi, - std::complex* c, SizeType const*const wc, - std::complex* a, SizeType const*const wa) +constexpr void trans(SizeType const p, + SizeType const*const na, + SizeType const*const pi, + std::complex* c, SizeType const*const wc, + std::complex* a, SizeType const*const wa) { - if( p < 2) - return; - - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - if(na == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - if(pi == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - - std::function* c, std::complex* a)> lambda; - - lambda = [&lambda, na, wc, wa, pi](SizeType r, std::complex* c, std::complex* a) - { - if(r > 0) - for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) - lambda(r-1, c, a); - else - for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) - *c = std::conj(*a); - }; - - lambda( p-1, c, a ); + if( p < 2){ + return; + } + if(c == nullptr || a == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + if(wc == nullptr || wa == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + if(na == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + if(pi == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + + + std::function* c, std::complex* a)> lambda; + + lambda = [&lambda, na, wc, wa, pi](SizeType r, std::complex* c, std::complex* a) + { + if(r > 0) + for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) + lambda(r-1, c, a); + else + for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) + *c = std::conj(*a); + }; + + lambda( p-1, c, a ); } +} // namespace boost::numeric::ublas -} -} -} - #endif diff --git a/include/boost/numeric/ublas/tensor/concepts.hpp b/include/boost/numeric/ublas/tensor/concepts.hpp new file mode 100644 index 000000000..70820484a --- /dev/null +++ b/include/boost/numeric/ublas/tensor/concepts.hpp @@ -0,0 +1,34 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_UBLAS_TENSOR_CONCEPTS_HPP +#define BOOST_UBLAS_TENSOR_CONCEPTS_HPP + +#include + +namespace boost::numeric::ublas{ + +template +concept integral = std::is_integral_v; + +template +concept signed_integral = integral && std::is_signed_v; + +template +concept unsigned_integral = integral && !signed_integral; + +template +concept floating_point = std::is_floating_point_v; + +} // namespace boost::numeric::ublas + +#endif // BOOST_UBLAS_TENSOR_CONCEPTS_BASIC_HPP diff --git a/include/boost/numeric/ublas/tensor/dynamic_extents.hpp b/include/boost/numeric/ublas/tensor/dynamic_extents.hpp deleted file mode 100644 index b8d2bdeb5..000000000 --- a/include/boost/numeric/ublas/tensor/dynamic_extents.hpp +++ /dev/null @@ -1,241 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_DYNAMIC_EXTENTS_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_DYNAMIC_EXTENTS_HPP_ - -#include -#include -#include -#include -#include -#include - -namespace boost { -namespace numeric { -namespace ublas { - -/** @brief Template class for storing tensor extents with runtime variable size. - * - * Proxy template class of std::vector. - * - */ -template -class basic_extents -{ - static_assert( std::numeric_limits::value_type>::is_integer, "Static error in basic_layout: type must be of type integer."); - static_assert(!std::numeric_limits::value_type>::is_signed, "Static error in basic_layout: type must be of type unsigned integer."); - -public: - using base_type = std::vector; - using value_type = typename base_type::value_type; - using const_reference = typename base_type::const_reference; - using reference = typename base_type::reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - - /** @brief Default constructs basic_extents - * - * @code auto ex = basic_extents{}; - */ - constexpr basic_extents() = default; - - /** @brief Copy constructs basic_extents from a one-dimensional container - * - * @code auto ex = basic_extents( std::vector(3u,3u) ); - * - * @note checks if size > 1 and all elements > 0 - * - * @param b one-dimensional container of type std::vector - */ - explicit basic_extents(base_type b) - : _base(std::move(b)) - { - if (!is_valid(*this)){ - throw std::length_error("Error in basic_extents::basic_extents() : shape tuple is not a valid permutation: has zero elements."); - } - } - - /** @brief Constructs basic_extents from an initializer list - * - * @code auto ex = basic_extents{3,2,4}; - * - * @note checks if size > 1 and all elements > 0 - * - * @param l one-dimensional list of type std::initializer - */ - basic_extents(std::initializer_list l) - : basic_extents( base_type(std::move(l)) ) - { - } - - /** @brief Constructs basic_extents from a range specified by two iterators - * - * @code auto ex = basic_extents(a.begin(), a.end()); - * - * @note checks if size > 1 and all elements > 0 - * - * @param first iterator pointing to the first element - * @param last iterator pointing to the next position after the last element - */ - constexpr basic_extents(const_iterator first, const_iterator last) - : basic_extents ( base_type( first,last ) ) - { - } - - /** @brief Copy constructs basic_extents */ - constexpr basic_extents(basic_extents const& l ) - : _base(l._base) - { - } - - /** @brief Move constructs basic_extents */ - constexpr basic_extents(basic_extents && l ) noexcept - : _base(std::move(l._base)) - { - } - - - template - constexpr basic_extents(OtherExtents const& e) - : _base(e.size()) - { - static_assert( is_extents_v, "boost::numeric::ublas::basic_extents(OtherExtents const&) : " - "OtherExtents should be a valid tensor extents" - ); - std::copy(e.begin(),e.end(), _base.begin()); - } - - ~basic_extents() = default; - - constexpr basic_extents& operator=(basic_extents && other) - noexcept(std::is_nothrow_swappable_v) - { - swap (*this, other); - return *this; - } - constexpr basic_extents& operator=(basic_extents const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_extents temp(other); - swap (*this, temp); - return *this; - } - - friend void swap(basic_extents& lhs, basic_extents& rhs) - noexcept(std::is_nothrow_swappable_v) - { - std::swap(lhs._base , rhs._base ); - } - - [[nodiscard]] inline - constexpr const_pointer data() const noexcept - { - return this->_base.data(); - } - - [[nodiscard]] inline - constexpr const_reference operator[] (size_type p) const - { - return this->_base[p]; - } - - [[nodiscard]] inline - constexpr const_reference at (size_type p) const - { - return this->_base.at(p); - } - - [[nodiscard]] inline - constexpr reference operator[] (size_type p) - { - return this->_base[p]; - } - - [[nodiscard]] inline - constexpr reference at (size_type p) - { - return this->_base.at(p); - } - - [[nodiscard]] inline - constexpr const_reference back () const - { - return this->_base.back(); - } - - - [[nodiscard]] inline - constexpr bool empty() const noexcept - { - return this->_base.empty(); - } - - [[nodiscard]] inline - constexpr size_type size() const noexcept - { - return this->_base.size(); - } - - inline - constexpr void clear() noexcept - { - this->_base.clear(); - } - - [[nodiscard]] inline - constexpr const_iterator - begin() const noexcept - { - return _base.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator - end() const noexcept - { - return _base.end(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return _base.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return _base.rend(); - } - - [[nodiscard]] inline - constexpr base_type const& base() const noexcept { return _base; } - -private: - - base_type _base{}; - -}; - -} // namespace ublas -} // namespace numeric -} // namespace boost - - -#endif diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp deleted file mode 100644 index 3f26eb345..000000000 --- a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp +++ /dev/null @@ -1,219 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// -/// \file strides.hpp Definition for the basic_strides template class - - -#ifndef _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ -#define _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ - -#include -#include -#include -#include - -namespace boost { -namespace numeric { -namespace ublas { - -template -class basic_extents; - - -/** @brief Template class for storing tensor strides for iteration with runtime variable size. - * - * Proxy template class of std::vector. - * - */ -template -class basic_strides -{ -public: - - using base_type = std::vector<__int_type>; - - static_assert( std::numeric_limits::is_integer, - "Static error in boost::numeric::ublas::basic_strides: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, - "Static error in boost::numeric::ublas::basic_strides: type must be of type unsigned integer."); - static_assert(std::is_same<__layout,layout::first_order>::value || std::is_same<__layout,layout::last_order>::value, - "Static error in boost::numeric::ublas::basic_strides: layout type must either first or last order"); - - - using layout_type = __layout; - using value_type = typename base_type::value_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - - /** @brief Default constructs basic_strides - * - * @code auto ex = basic_strides{}; - */ - constexpr explicit basic_strides() = default; - - /** @brief Constructs basic_strides from basic_extents for the first- and last-order storage formats - * - * @code auto strides = basic_strides( basic_extents{2,3,4} ); - * - */ - template - constexpr basic_strides(ExtentsType const& s) - : _base(s.size(),1) - { - static_assert( is_extents_v, "boost::numeric::ublas::basic_strides(ExtentsType const&) : " - "ExtentsType is not a tensor extents" - ); - if( s.empty() ) - return; - - if( !is_valid(s) ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_strides(ExtentsType const&) : " - "shape is not valid." - ); - - if( is_vector(s) || is_scalar(s) ) - return; - - if( this->size() < 2 ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_strides(ExtentsType const&) : " - "size of strides must be greater or equal to 2." - ); - - - if constexpr (std::is_same::value){ - std::transform(s.begin(), s.end() - 1, _base.begin(), _base.begin() + 1, std::multiplies{}); - }else { - std::transform(s.rbegin(), s.rend() - 1, _base.rbegin(), _base.rbegin() + 1, std::multiplies{}); - } - } - - constexpr basic_strides(basic_strides const& l) - : _base(l._base) - {} - - constexpr basic_strides(basic_strides && l ) noexcept - : _base(std::move(l._base)) - {} - - constexpr basic_strides(base_type const& l ) - : _base(l) - {} - - constexpr basic_strides(base_type && l ) noexcept - : _base(std::move(l)) - {} - - ~basic_strides() = default; - - constexpr basic_strides& operator=(basic_strides&& other) - noexcept(std::is_nothrow_swappable_v) - { - swap (*this, other); - return *this; - } - - constexpr basic_strides& operator=(basic_strides const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_strides temp(other); - swap (*this, temp); - return *this; - } - - friend void swap(basic_strides& lhs, basic_strides& rhs) - noexcept(std::is_nothrow_swappable_v) - { - std::swap(lhs._base , rhs._base); - } - - [[nodiscard]] inline - constexpr const_reference operator[] (size_type p) const{ - return _base[p]; - } - - [[nodiscard]] inline - constexpr const_pointer data() const{ - return _base.data(); - } - - [[nodiscard]] inline - constexpr const_reference at (size_type p) const{ - return _base.at(p); - } - - [[nodiscard]] inline - constexpr const_reference back () const{ - return _base.back(); - } - - [[nodiscard]] inline - constexpr reference back (){ - return _base.back(); - } - - [[nodiscard]] inline - constexpr bool empty() const noexcept{ - return _base.empty(); - } - - [[nodiscard]] inline - constexpr size_type size() const noexcept{ - return _base.size(); - } - - [[nodiscard]] inline - constexpr const_iterator begin() const noexcept{ - return _base.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator end() const noexcept{ - return _base.end(); - } - - inline - constexpr void clear() noexcept{ - this->_base.clear(); - } - - [[nodiscard]] inline - constexpr base_type const& base() const noexcept{ - return this->_base; - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return _base.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return _base.rend(); - } - -private: - base_type _base{}; -}; - -} -} -} - -#endif diff --git a/include/boost/numeric/ublas/tensor/expression.hpp b/include/boost/numeric/ublas/tensor/expression.hpp index 9b3c44d2c..47d534010 100644 --- a/include/boost/numeric/ublas/tensor/expression.hpp +++ b/include/boost/numeric/ublas/tensor/expression.hpp @@ -14,13 +14,11 @@ #include #include -#include -#include -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +#include "tags.hpp" + +namespace boost::numeric::ublas::detail +{ /** @\brief base class for tensor expressions * @@ -67,16 +65,16 @@ struct binary_tensor_expression using size_type = typename tensor_type::size_type; - explicit constexpr binary_tensor_expression(expression_type_left const& l, expression_type_right const& r, binary_operation o) - : el(l) , er(r) , op(o) {} + explicit constexpr binary_tensor_expression(expression_type_left const& l, expression_type_right const& r, binary_operation o) : el(l) , er(r) , op(std::move(o)) {} + constexpr binary_tensor_expression(binary_tensor_expression&& l) noexcept = delete; + constexpr binary_tensor_expression& operator=(binary_tensor_expression&& l) noexcept = delete; + ~binary_tensor_expression() = default; + binary_tensor_expression() = delete; binary_tensor_expression(const binary_tensor_expression& l) = delete; - constexpr binary_tensor_expression(binary_tensor_expression&& l) noexcept - : el(l.el), er(l.er), op(std::move(l.op)) {} - constexpr binary_tensor_expression& operator=(binary_tensor_expression&& l) noexcept = default; binary_tensor_expression& operator=(binary_tensor_expression const& l) noexcept = delete; - ~binary_tensor_expression() = default; + [[nodiscard]] inline constexpr decltype(auto) operator()(size_type i) const { return op(el(i), er(i)); } @@ -135,24 +133,22 @@ struct unary_tensor_expression using self_type = unary_tensor_expression; using tensor_type = T; using expression_type = E; - + using unary_operation = OP; using derived_type = tensor_expression >; using size_type = typename tensor_type::size_type; - explicit constexpr unary_tensor_expression(E const& ee, OP o) : e(ee) , op(o) {} - constexpr unary_tensor_expression() = delete; - unary_tensor_expression(const unary_tensor_expression& l) = delete; - constexpr unary_tensor_expression(unary_tensor_expression&& l) noexcept - : e(l.e), op(std::move(l.op)) {} + explicit constexpr unary_tensor_expression(expression_type const& ee, unary_operation o) : e(ee) , op(std::move(o)) {} + constexpr unary_tensor_expression(unary_tensor_expression&& l) noexcept = delete; + constexpr unary_tensor_expression& operator=(unary_tensor_expression&& l) noexcept = delete; - constexpr unary_tensor_expression& operator=(unary_tensor_expression&& l) noexcept = default; - + constexpr unary_tensor_expression() = delete; + unary_tensor_expression(unary_tensor_expression const& l) = delete; unary_tensor_expression& operator=(unary_tensor_expression const& l) noexcept = delete; ~unary_tensor_expression() = default; - [[nodiscard]] inline - constexpr decltype(auto) operator()(size_type i) const { return op(e(i)); } + [[nodiscard]] inline constexpr + decltype(auto) operator()(size_type i) const { return op(e(i)); } E const& e; OP op; @@ -181,8 +177,6 @@ constexpr auto make_unary_tensor_expression( vector_expression const& e, OP o } -} -} -} -} -#endif +} // namespace boost::numeric::ublas::detail + +#endif // BOOST_UBLAS_TENSOR_EXPRESSIONS_HPP diff --git a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp index a169b4e62..d29b6eabe 100644 --- a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp +++ b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,20 +13,22 @@ #ifndef BOOST_UBLAS_TENSOR_EXPRESSIONS_EVALUATION_HPP #define BOOST_UBLAS_TENSOR_EXPRESSIONS_EVALUATION_HPP -#include #include -#include +#include + + + +#include "extents.hpp" +#include "extents/extents_functions.hpp" +#include "type_traits.hpp" namespace boost::numeric::ublas { -template +template class tensor_core; -template -class basic_extents; - -} +} // namespace boost::numeric::ublas namespace boost::numeric::ublas::detail { @@ -39,7 +41,7 @@ struct binary_tensor_expression; template struct unary_tensor_expression; -} +} // namespace boost::numeric::ublas::detail namespace boost::numeric::ublas::detail { @@ -66,11 +68,8 @@ struct has_tensor_types> } // namespace boost::numeric::ublas::detail -namespace boost::numeric::ublas::detail { - - - - +namespace boost::numeric::ublas::detail +{ /** @brief Retrieves extents of the tensor_core * @@ -166,91 +165,83 @@ constexpr auto& retrieve_extents(unary_tensor_expression const& expr) namespace boost::numeric::ublas::detail { -template +template [[nodiscard]] inline -constexpr auto all_extents_equal(tensor_core const& t, Extents const& extents) + constexpr auto all_extents_equal(tensor_core const& t, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); - - return extents == t.extents(); + return ::operator==(e,t.extents()); } -template +template [[nodiscard]] -constexpr auto all_extents_equal(tensor_expression const& expr, Extents const& extents) +constexpr auto all_extents_equal(tensor_expression const& expr, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors."); auto const& cast_expr = static_cast(expr); + using ::operator==; + using ::operator!=; if constexpr ( std::is_same::value ) - if( extents != cast_expr.extents() ) + if( e != cast_expr.extents() ) return false; if constexpr ( detail::has_tensor_types::value ) - if ( !all_extents_equal(cast_expr, extents)) + if ( !all_extents_equal(cast_expr, e)) return false; return true; } -template +template [[nodiscard]] -constexpr auto all_extents_equal(binary_tensor_expression const& expr, Extents const& extents) +constexpr auto all_extents_equal(binary_tensor_expression const& expr, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); - static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors."); + using ::operator==; + using ::operator!=; + if constexpr ( std::is_same::value ) - if(extents != expr.el.extents()) + if(e != expr.el.extents()) return false; if constexpr ( std::is_same::value ) - if(extents != expr.er.extents()) + if(e != expr.er.extents()) return false; if constexpr ( detail::has_tensor_types::value ) - if(!all_extents_equal(expr.el, extents)) + if(!all_extents_equal(expr.el, e)) return false; if constexpr ( detail::has_tensor_types::value ) - if(!all_extents_equal(expr.er, extents)) + if(!all_extents_equal(expr.er, e)) return false; return true; } -template +template [[nodiscard]] -constexpr auto all_extents_equal(unary_tensor_expression const& expr, Extents const& extents) +constexpr auto all_extents_equal(unary_tensor_expression const& expr, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); - static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors."); + using ::operator==; + if constexpr ( std::is_same::value ) - if(extents != expr.e.extents()) + if(e != expr.e.extents()) return false; if constexpr ( detail::has_tensor_types::value ) - if(!all_extents_equal(expr.e, extents)) + if(!all_extents_equal(expr.e, e)) return false; return true; @@ -259,7 +250,8 @@ constexpr auto all_extents_equal(unary_tensor_expression const& expr, Ex } // namespace boost::numeric::ublas::detail -namespace boost::numeric::ublas::detail { +namespace boost::numeric::ublas::detail +{ /** @brief Evaluates expression for a tensor_core @@ -290,10 +282,10 @@ template const& expr) { - static_assert(is_valid_tensor_v && is_valid_tensor_v, - "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " - "tensor_type and tensor_expresssion should be a valid tensor type" - ); +// static_assert(is_valid_tensor_v && is_valid_tensor_v, +// "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " +// "tensor_type and tensor_expresssion should be a valid tensor type" +// ); static_assert(std::is_same_v, "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " @@ -347,5 +339,5 @@ inline void eval(tensor_type& lhs, unary_fn const& fn) } -} +} // namespace boost::numeric::ublas::detail #endif diff --git a/include/boost/numeric/ublas/tensor/extents.hpp b/include/boost/numeric/ublas/tensor/extents.hpp new file mode 100644 index 000000000..74034264e --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents.hpp @@ -0,0 +1,53 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef BOOST_UBLAS_TENSOR_EXTENTS_HPP +#define BOOST_UBLAS_TENSOR_EXTENTS_HPP + +#include "extents/extents_base.hpp" +#include "extents/extents_dynamic_size.hpp" +#include "extents/extents_static_size.hpp" +#include "extents/extents_static.hpp" +#include "extents/extents_functions.hpp" +#include "extents/extents_static_functions.hpp" + + +template +bool operator==( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +bool operator==( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +bool operator!=( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return !( lhs == rhs) ; +} + +template +bool operator!=( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return !( lhs == rhs) ; +} + + +#endif // BOOST_UBLAS_TENSOR_EXTENTS_HPP diff --git a/include/boost/numeric/ublas/tensor/extents/extents_base.hpp b/include/boost/numeric/ublas/tensor/extents/extents_base.hpp new file mode 100644 index 000000000..40fc01846 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_base.hpp @@ -0,0 +1,54 @@ +// +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_BASE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_BASE_HPP + +#include +//#include +#include + +#include "../concepts.hpp" + +namespace boost::numeric::ublas { + + +template +struct extents_base +{ + + using derived_type = D; + inline constexpr decltype(auto) operator()() const { return static_cast(*this); } + inline constexpr decltype(auto) operator()() { return static_cast< derived_type&>(*this); } + +}; + +template +class extents_core; + +template +using extents = extents_core; + +template struct is_extents : std::false_type {}; +template struct is_strides : std::false_type {}; +template struct is_dynamic : std::false_type {}; +template struct is_static : std::false_type {}; +template struct is_dynamic_rank : std::false_type {}; +template struct is_static_rank : std::false_type {}; + +template inline static constexpr bool const is_extents_v = is_extents::value; +template inline static constexpr bool const is_strides_v = is_strides::value; +template inline static constexpr bool const is_dynamic_v = is_dynamic::value; +template inline static constexpr bool const is_static_v = is_static ::value; +template inline static constexpr bool const is_dynamic_rank_v = is_dynamic_rank::value; +template inline static constexpr bool const is_static_rank_v = is_static_rank::value; + +} // namespace boost::numeric::ublas + +#endif // _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_BASE_HPP_ diff --git a/include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp b/include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp new file mode 100644 index 000000000..fe3266050 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp @@ -0,0 +1,154 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_DYNAMIC_SIZE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_DYNAMIC_SIZE_HPP + +#include +#include +#include +#include +#include + +#include "extents_base.hpp" +#include "extents_functions.hpp" + +#include "../layout.hpp" +#include "../concepts.hpp" + +namespace boost::numeric::ublas { + + + +/** @brief Template class for storing tensor extents with runtime variable size. + * + * Proxy template class of std::vector. + * + */ +template +class extents_core + : public extents_base> +{ + using super_type = extents_base>; + +public: + using base_type = std::vector; + using value_type = typename base_type::value_type; + using const_reference = typename base_type::const_reference; + using reference = typename base_type::reference; + using size_type = typename base_type::size_type; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + + extents_core() = default; + + explicit extents_core(base_type b) + : _base(std::move(b)) + { + if (!ublas::is_valid(*this)){ + throw std::invalid_argument("in boost::numeric::ublas::extents<> : " + "could not intanstiate extents<> as provided extents are not valid."); + } + } + + /** @brief Constructs extents from an initializer list + * + * @code auto ex = extents<>{}; @endcode + * @code auto ex = extents<>{3,2,4}; @endcode + * + * @note checks if size > 1 and all elements > 0 + * + * @param l one-dimensional list of type std::initializer + */ + extents_core(std::initializer_list l) + : extents_core( base_type(l) ) + { + if (!ublas::is_valid(*this)){ + throw std::invalid_argument("in boost::numeric::ublas::extents<> : " + "could not intanstiate extents<> as provided extents are not valid."); + } + } + + /** @brief Constructs extents from a range specified by two iterators + * + * @code auto ex = extents<>(a.begin(), a.end()); + * + * @note checks if size > 1 and all elements > 0 + * + * @param first iterator pointing to the first element + * @param last iterator pointing to the next position after the last element + */ + + template + constexpr extents_core(InputIt first, InputIt last) + : extents_core ( base_type( first,last ) ) + { + if (!ublas::is_valid(*this)){ + throw std::invalid_argument("in boost::numeric::ublas::extents<> : " + "could not intanstiate extents<> as provided extents are not valid."); + } + } + + /** @brief Copy constructs extents */ + /*constexpr*/ extents_core(extents_core const& l ) + : _base(l._base) + { + } + + /** @brief Move constructs extents */ + /*constexpr*/ extents_core(extents_core && l ) noexcept + : _base(std::move(l._base)) + { + } + + ~extents_core() = default; + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + extents_core& operator=(extents_core other) + noexcept(std::is_nothrow_swappable_v) + { + swap (*this, other); + return *this; + } + + + friend void swap(extents_core& lhs, extents_core& rhs) + noexcept(std::is_nothrow_swappable_v) + { + std::swap(lhs._base,rhs._base); + } + + [[nodiscard]] inline /*constexpr*/ const_reference operator[] (size_type p) const { return this->_base[p]; } + [[nodiscard]] inline /*constexpr*/ const_reference at (size_type p) const { return this->_base.at(p); } + + [[nodiscard]] inline /*constexpr*/ auto size() const noexcept { return this->_base.size(); } + [[nodiscard]] inline /*constexpr*/ auto const& base() const noexcept { return this->_base; } + [[nodiscard]] inline /*constexpr*/ const_pointer data() const noexcept { return this->_base.data(); } +private: + base_type _base; +}; + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas{ +template struct is_extents < extents_core > : std::true_type {}; +template struct is_dynamic < extents_core > : std::true_type {}; +template struct is_dynamic_rank < extents_core > : std::true_type {}; + +} // namespace boost::numeric::ublas + + +#endif diff --git a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp new file mode 100644 index 000000000..85e64ff8f --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp @@ -0,0 +1,247 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP + + +#include +#include +//#include + +#include "../layout.hpp" +#include "../concepts.hpp" + +namespace boost::numeric::ublas +{ +template +class extents_core; + + +template +struct extents_base; + +template [[nodiscard]] constexpr inline auto front (extents_base const& e) noexcept -> typename D::const_reference { return e().base().front(); } +template [[nodiscard]] constexpr inline auto back (extents_base const& e) noexcept -> typename D::const_reference { return e().base().back(); } +template [[nodiscard]] constexpr inline auto begin (extents_base const& e) noexcept -> typename D::const_iterator { return e().base().begin(); } +template [[nodiscard]] constexpr inline auto end (extents_base const& e) noexcept -> typename D::const_iterator { return e().base().end(); } +template [[nodiscard]] constexpr inline auto cbegin(extents_base const& e) noexcept -> typename D::const_iterator { return e().base().cbegin(); } +template [[nodiscard]] constexpr inline auto cend (extents_base const& e) noexcept -> typename D::const_iterator { return e().base().cend(); } +template [[nodiscard]] constexpr inline auto rbegin(extents_base const& e) noexcept -> typename D::const_reverse_iterator { return e().base().rbegin(); } +template [[nodiscard]] constexpr inline auto rend (extents_base const& e) noexcept -> typename D::const_reverse_iterator { return e().base().rend(); } +template [[nodiscard]] constexpr inline auto empty (extents_base const& e) noexcept -> bool { return e().base().empty(); } +template [[nodiscard]] constexpr inline auto size (extents_base const& e) noexcept -> typename D::size_type { return e().base().size(); } + +} //namespace boost::numeric::ublas + + +namespace boost::numeric::ublas +{ + +/** @brief Returns true if extents equals ([m,n,...,l]) with m>0,n>0,...,l>0 */ +template +[[nodiscard]] inline constexpr bool is_valid(extents_base const& e) +{ + return std::all_of(begin(e),end(e), [](auto a){ return a>0UL; } ); +} + +/** @brief Returns true if extents equals (m,[n,...,l]) with m=1,n=1,...,l=1 */ +template +[[nodiscard]] inline constexpr bool is_scalar(extents_base const& e) +{ + return (size(e)>0) && std::all_of (cbegin(e),cend(e),[](auto a){return a==1ul;}); +} + +/** @brief Returns true if extents equals (m,[n,1,...,1]) with m>=1||n>=1 && m==1||n==1*/ +template +[[nodiscard]] inline constexpr bool is_vector(extents_base const& e) +{ + if (empty(e) ) {return false;} + if (size (e) == 1) {return front(e)>=1ul;} + + return + std::any_of(cbegin(e) ,cbegin(e)+2ul, [](auto a){return a>=1ul;}) && + std::any_of(cbegin(e) ,cbegin(e)+2ul, [](auto a){return a==1ul;}) && + std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;}); + +// std::any_of(cbegin(e) ,cbegin(e)+2, [](auto a){return a>=1UL;}) && // a>1UL +// std::any_of(cbegin(e) ,cbegin(e)+2, [](auto a){return a==1UL;}) && +// std::all_of(cbegin(e)+2,cend(e) , [](auto a){return a==1UL;}); +} + +/** @brief Returns true if (m,[n,1,...,1]) with m>=1 or n>=1 */ +template +[[nodiscard]] inline constexpr bool is_matrix(extents_base const& e) +{ + if (empty(e) ) {return false;} + if (size (e) == 1) {return front(e)>=1ul;} + + return std::any_of (cbegin(e) ,cbegin(e)+2, [](auto a){return a>=1ul;}) && // all_of > 1UL + std::all_of (cbegin(e)+2,cend(e) , [](auto a){return a==1ul;}); +} + +/** @brief Returns true if shape is has a tensor shape + * + * @returns true if is_valid & not (is_scalar&is_vector&is_matrix) + */ +template +[[nodiscard]] inline constexpr bool is_tensor(extents_base const& e) +{ + return size(e) > 2 && + std::all_of (cbegin(e) ,cbegin(e)+2, [](auto a){return a>=1ul;}) && // all_of > 1UL + std::any_of (cbegin(e)+2,cend(e) , [](auto a){return a> 1ul;}); +} + + + + + +/** @brief Computes the number of elements */ +template +[[nodiscard]] inline constexpr auto product( extents_base const& e ) +{ + if( empty(e) ){ + return std::size_t{0UL}; + } + + return std::accumulate(begin(e), end(e), std::size_t{1UL}, std::multiplies<>{}); +} + + +//template // std::inserter(out,out.begin()) +//inline constexpr +// void squeeze( +// extents_base const& in, +// extents_base const& out) +//{ +// if(e().size() < 2){ return; } + +// if(is_vector(e) || is_scalar(e)) { +// std::copy (in, in+2, out ); +// } +// else{ +// std::copy_if(in, in_end, out, [](auto a){return a!=1u;}); +// } +//} + +//template +//[[nodiscard]] inline bool operator==( +// extents_base const& lhs, +// extents_base const& rhs ) +//{ +// return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +//} + +//template +//[[nodiscard]] inline bool operator!=( +// extents_base const& lhs, +// extents_base const& rhs ) +//{ +// return !( lhs == rhs) ; +//} + +template +[[nodiscard]] inline auto to_strides(extents_core const& e, L /*unused*/) +{ + auto s = typename extents_core::base_type(e.size(),1ul); + + if(empty(e) || is_vector(e) || is_scalar(e)){ + return s; + } + if constexpr(std::is_same_v){ + std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); + } else { + std::transform(rbegin(e), rend(e) - 1, s.rbegin(), s.rbegin()+1, std::multiplies<>{}); + } + return s; +} + +template +[[nodiscard]] inline auto to_strides(extents_core const& e, L /*unused*/) +{ + auto s = typename extents_core::base_type{}; + std::fill(s.begin(),s.end(),1ul); + + if(empty(e) || is_vector(e) || is_scalar(e)){ + return s; + } + if constexpr(std::is_same_v){ + std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); + } else { + std::transform(rbegin(e), rend(e) - 1, s.rbegin(), s.rbegin()+1, std::multiplies<>{}); + } + return s; +} + + + +} // namespace boost::numeric::ublas + + +template +[[nodiscard]] inline constexpr bool operator==( + boost::numeric::ublas::extents_core const& lhs, + boost::numeric::ublas::extents_core const& rhs ) +{ + if constexpr(m != n) + return false; + return std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +[[nodiscard]] inline constexpr bool operator!=( + boost::numeric::ublas::extents_core const& lhs, + boost::numeric::ublas::extents_core const& rhs ) +{ + if constexpr(m == n) + return false; + return !(lhs == rhs) ; +} + +template +[[nodiscard]] inline constexpr bool operator==( + boost::numeric::ublas::extents_base const& lhs, + boost::numeric::ublas::extents_base const& rhs ) +{ + return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +[[nodiscard]] inline constexpr bool operator!=( + boost::numeric::ublas::extents_base const& lhs, + boost::numeric::ublas::extents_base const& rhs ) +{ + return !( lhs == rhs) ; +} + + +namespace std +{ + +template +struct tuple_size< boost::numeric::ublas::extents_core > + : integral_constant::base_type>> +{}; + +template +[[nodiscard]] constexpr inline + auto get(boost::numeric::ublas::extents_core const& e) noexcept +{ + return std::get(e.base()); +} + +} // namespace std + + +#endif // _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ + + diff --git a/include/boost/numeric/ublas/tensor/extents/extents_static.hpp b/include/boost/numeric/ublas/tensor/extents/extents_static.hpp new file mode 100644 index 000000000..3f375b50d --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_static.hpp @@ -0,0 +1,78 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_HPP + +#include +#include +#include +#include + +#include "extents_functions.hpp" +#include "extents_base.hpp" +#include "../concepts.hpp" + + +namespace boost::numeric::ublas { + + +/** @brief Template class for storing tensor extents for compile time. + * + * @code extents<1,2,3,4> t @endcode + * + * @tparam e parameter pack of extents + * + */ +template +class extents_core + : public extents_base> +{ + static constexpr auto size = sizeof...(e)+2u; +public: + + using base_type = std::array; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + constexpr extents_core() = default; + constexpr extents_core(extents_core const&) noexcept = default; + constexpr extents_core(extents_core &&) noexcept = default; + constexpr extents_core& operator=(extents_core const&) noexcept = default; + constexpr extents_core& operator=(extents_core &&) noexcept = default; + ~extents_core() = default; + + [[nodiscard]] inline constexpr const_reference at (size_type k) const{ return m_data.at(k); } + [[nodiscard]] inline constexpr const_reference operator[](size_type k) const{ return m_data[k]; } + [[nodiscard]] inline constexpr base_type const& base() const noexcept{ return m_data; } + [[nodiscard]] inline constexpr const_pointer data () const noexcept{ return m_data.data(); } + +private: + static constexpr base_type const m_data{e1,e2,e...}; + +}; + +template struct is_extents < extents_core > : std::true_type {}; +template struct is_static < extents_core > : std::true_type {}; +template struct is_static_rank < extents_core > : std::true_type {}; + +} // namespace boost::numeric::ublas + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp new file mode 100644 index 000000000..08b930d6f --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp @@ -0,0 +1,637 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_FUNCTIONS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_FUNCTIONS_HPP + + +#include +#include +#include + +#include "extents_base.hpp" +#include "../layout.hpp" + + + +namespace boost::numeric::ublas +{ + + + +//////////////// SIZE /////////////// + +namespace detail { +template +struct size_impl_t; +template +struct size_impl_t> +{ static constexpr auto value = sizeof ...(es); }; +template +struct size_impl_t> +{ static constexpr auto value = sizeof ...(es); }; +} // namespace detail + +/** @brief Returns the size of a pure static extents type + * + * @code constexpr auto n = size_v>; + * @note corresponds to std::tuple_size_v + * +*/ +template +constexpr inline auto size_v = detail::size_impl_t>::value; + +//////////////// EMPTY /////////////// + +namespace detail { +template +struct empty_impl_t; + +template +struct empty_impl_t> +{ static constexpr bool value = size_v> == 0ul; }; +} // namespace detail + +/** @brief Returns if a pure static extents type is empty + * + * @code constexpr bool empty = empty_v>; // -> false + * +*/ +template +constexpr inline bool empty_v = detail::empty_impl_t>::value; + + +//////////////// GET ///////////////////// + +namespace detail { +template +struct get_impl_t; + +template +struct get_impl_t> +{ + static constexpr auto value = 0; +}; + +template +struct get_impl_t> +{ + static_assert ( j < n && k < n ); + static constexpr auto value = e1; +}; + +template +struct get_impl_t> +{ + static_assert ( k < n && j < n ); + static constexpr auto value = (j==k) ? e1 : get_impl_t>::value; +}; +} // namespace detail + +/** @brief Returns the j-th element of a pure static extents type with 0 <= j < size_v + * + * @code constexpr auto e_j = get_v,2>; + * +*/ +template +constexpr inline auto get_v = detail::get_impl_t,std::decay_t>::value; + + +//////////////// CAT ///////////////////// +namespace detail { +template +struct cat_impl_t +{ + template + struct inner; + template + struct inner < std::index_sequence, std::index_sequence > + { + using type = extents < get_v..., get_v... >; + }; + using type = typename inner < + std::make_index_sequence>, + std::make_index_sequence> >::type; +}; +} // namespace detail + +/** @brief Concatenates two static extents type + * + * @code using extents_t = cat,extents<7,6>>; // -> extents<4,3,2,7,6> + * + * @tparam EL left extents<...> + * @tparam ER right extents<...> +*/ +template +using cat_t = typename detail::cat_impl_t,std::decay_t>::type; + +//////////////// FOR_EACH //////////////// + +namespace detail { + +template class> +struct for_each_impl_t; + +template class UnaryOp, typename std::size_t ... es> +struct for_each_impl_t, UnaryOp > +{ using type = extents< ( UnaryOp::value )... >; }; + +template class UnaryOp, typename std::size_t ... is> +struct for_each_impl_t, UnaryOp > +{ using type = std::index_sequence< ( UnaryOp::value )... >; }; + +} // namespace detail + +/** @brief Applies a unary operation for each element of a given static extents type + * + * @code template struct add5 { static constexpr auto value = e+5; }; + * @code using extents_t = for_each,add5>; // -> extents<9,8,7> + * + * @tparam E extents<...> +*/ +template typename UnaryOp> +using for_each_t = typename detail::for_each_impl_t, UnaryOp>::type; + +//////////////// TEST //////////////// + +namespace detail { + +template class> +struct for_each_test_impl_t; + +template class UnaryPred, typename std::size_t ... es> +struct for_each_test_impl_t, UnaryPred > +{ using type = std::integer_sequence::value )... >; }; + +template class UnaryPred, typename std::size_t ... is> +struct for_each_test_impl_t, UnaryPred > +{ using type = std::integer_sequence::value )... >; }; + +} // namespace detail + +/** @brief Returns true if for each element of a given static extents type the unary predicate holds + * + * @code template struct equal5 { static constexpr bool value = e==5; }; + * @code using sequence_t = for_each,equal5>; // -> std::integer_sequence + * + * @tparam E extents<...> +*/ +template typename UnaryPred> +using for_each_test_t = typename detail::for_each_test_impl_t, UnaryPred>::type; + + +//////////////// SELECT INDEX SEQUENCE ///////////////// + +namespace detail { +template +struct select_impl_t +{ + static_assert( size_v >= I::size() ); + template struct inner; + template + struct inner > { using type = extents ... >; }; + using type = typename inner::type; +}; +} // namespace detail + +/** @brief Returns a static extents type selected from a static extents type using std::index_sequence + * + * @code using extents_t = select,std::index_sequence<0,2>>; // -> extents<4,2> + * + * @tparam E extents<...> + * @tparam S std::index_sequence<...> +*/ +template +using select_t = typename detail::select_impl_t, S>::type; + + +//////////////// BINARY PLUS OP ///////////////// + +template +struct plus_t { static constexpr auto value = i+j; }; + +template +constexpr inline auto plus_v = plus_t::value; + +template +struct multiply_t { static constexpr auto value = i*j;}; + + + +//////////////// SET ///////////////////// + +namespace detail { +template +struct set_impl_t; + +template +struct set_impl_t> +{ + static constexpr inline auto n = size_v>; + template using plus_j1 = plus_t; + + using head_indices = std::make_index_sequence; + using tail_indices = for_each_t,plus_j1>; + + using head = select_t,head_indices>; + using tail = select_t,tail_indices>; + using type = cat_t>,tail>; +}; +} // namespace detail + +/** @brief Sets the j-th element of a pure static extents type with 0 <= j < size_v + * + * @code using extents_t = set_t<2,5,extents<4,3,2>>; // extents<4,3,5> + * + * @tparam j j-th position in extents with 0 <= j < size_v + * @tparam e value to replace the j-th element + * @tparam E extents +*/ +template +using set_t = typename detail::set_impl_t>::type; + + + +//////////////// REVERSE ////////////////// + +namespace detail { +template +struct reverse_impl_t; + +template +struct reverse_impl_t> +{ + using type = extents < ( get_v-js-1>) ... >; +}; +} // namespace detail + +/** @brief Reverses static extents of a static extents type + * + * @code using extents_t = reverse_t>; // -> extents<2,3,4> + * + * @tparam E extents<...> +*/ +template +using reverse_t = typename detail::reverse_impl_t, std::make_index_sequence>>::type; + + +//////////////// REMOVE ////////////////// + +namespace detail{ +template +struct remove_element_impl_t +{ + static constexpr auto n = E::size(); + using head = select_t >; + + template + struct tail_indices; + template + struct tail_indices> + { + using type = extents< (is+k+1) ... >; + }; + using tail = select_t>::type>; + using type = cat_t< head, tail>; +}; +} // namespace detail + +/** @brief Removes a static extent of a static extents type + * + * @code using extents_t = remove<1,extents<4,3,2>>; // -> extents<4,2> + * @note it is a special case of the select function + * + * @tparam k zero-based index + * @tparam E extents<...> +*/ +template +using remove_element_t = typename detail::remove_element_impl_t>::type; + + + +//////////////// ACCUMULATE ///////////////////// + +namespace detail { + +template class> +struct accumulate_impl_t; + +template class op, std::size_t i> +struct accumulate_impl_t, i, op> +{ static constexpr auto value = i; }; + +template class op, std::size_t i, std::size_t e> +struct accumulate_impl_t, i, op> +{ static constexpr auto value = op::value; }; + +template class op, std::size_t i, std::size_t e, std::size_t ... es> +struct accumulate_impl_t, i, op> +{ + using next = accumulate_impl_t,i,op>; + static constexpr auto value = op::value; + +}; +} // namespace detail + +template class BinaryOp> +constexpr inline auto accumulate_v = detail::accumulate_impl_t>,I,BinaryOp>::value; + + +//////////////// Product ///////////////////// + +namespace detail { + +template +struct product_impl_t +{ + static constexpr auto value = empty_v ? 0UL : accumulate_v; +}; + +} // namespace detail +template +constexpr inline auto product_v = detail::product_impl_t>::value; + + +//////////////// ALL_OF ///////////////////// + + +namespace detail { + +template +struct all_of_impl_t; +template<> +struct all_of_impl_t> +{ static constexpr bool value = true; }; +template +struct all_of_impl_t> +{ static constexpr bool value = ( e && ... && es ); }; + +} // namespace detail + +/** @brief Returns true if all elements of Extents satisfy UnaryOp + * + * @code constexpr auto e_j = all_of_v>; +*/ +template class UnaryPred> +constexpr inline bool all_of_v = detail::all_of_impl_t,UnaryPred>>::value; + + +//////////////// ALL_OF ///////////////////// + +namespace detail { +template +struct any_of_impl_t; +template<> +struct any_of_impl_t> +{ static constexpr bool value = true;}; + +template +struct any_of_impl_t> +{ static constexpr bool value = ( e || ... || es ); }; + +} // namespace detail + +template class UnaryOp> +constexpr inline bool any_of_v = detail::any_of_impl_t,UnaryOp>>::value; + + +//////////////// IS_VALID ///////////////////// + +namespace detail { + +template +struct is_valid_impl_t { static constexpr bool value = false; }; +template<> +struct is_valid_impl_t> { static constexpr bool value = true ; }; + +template +struct is_valid_impl_t> +{ + template + struct greater_than_zero { static constexpr auto value = (n>0ul); }; + + static constexpr bool value = all_of_v,greater_than_zero >; +}; +} // namespace detail + +/** @brief Returns true if extents equals ([m,n,...,l]) with m>0,n>0,...,l>0 */ +template +constexpr inline bool is_valid_v = detail::is_valid_impl_t>::value; + + + +//////////////// IS_SCALAR ///////////////////// + +namespace detail { +template +struct is_scalar_impl_t +{ + template + struct equal_to_one { static constexpr auto value = (n == 1ul); }; + + static constexpr bool value = is_valid_v && + !empty_v && + all_of_v; +}; +} // namespace detail + +/** @brief Returns true if extents equals (m,[n,...,l]) with m=1,n=1,...,l=1 */ +template +constexpr inline bool is_scalar_v = detail::is_scalar_impl_t>::value; + + +//////////////// IS_VECTOR ///////////////////// + +namespace detail { + +template +struct is_vector_impl_t { static constexpr bool value = false; }; +template<> +struct is_vector_impl_t> { static constexpr bool value = false; }; + +template +struct is_vector_impl_t> { static constexpr bool value = (e>=1); }; + +template +struct is_vector_impl_t> +{ + template struct equal_to_one { static constexpr auto value = (n == 1ul); }; + template struct greater_than_zero { static constexpr auto value = (n > 0ul); }; + + static constexpr bool value = + is_valid_v > && + any_of_v ,greater_than_zero> && + any_of_v ,equal_to_one > && + all_of_v ,equal_to_one >; +}; + + +} // namespace detail + +/** @brief Returns true if extents equals (m,[n,1,...,1]) with m>=1||n>=1 && m==1||n==1*/ +template +constexpr inline bool is_vector_v = detail::is_vector_impl_t>::value; + + + +//////////////// IS_MATRIX ///////////////////// + +namespace detail { + +template +struct is_matrix; + +template<> +struct is_matrix> { static constexpr bool value = false; }; + +template +struct is_matrix> { static constexpr bool value = true; }; + +template +struct is_matrix> +{ + template struct equal_to_one { static constexpr auto value = (n == 1ul); }; + template struct greater_than_zero { static constexpr auto value = (n > 0ul); }; + + static constexpr bool value = + is_valid_v > && + all_of_v ,greater_than_zero > && + all_of_v ,equal_to_one >; +}; + + +} // namespace detail + +/** @brief Returns true if (m,n,[1,...,1]) with m>=1 or n>=1 */ +template +constexpr inline bool is_matrix_v = detail::is_matrix>::value; + + +//////////////// IS_TENSOR ///////////////////// + +namespace detail { + +template +struct is_tensor; + +template<> +struct is_tensor> { static constexpr bool value = false; }; + +template +struct is_tensor> { static constexpr bool value = false; }; + +template +struct is_tensor> +{ + template + struct greater_than_one { static constexpr auto value = (n > 1ul); }; + + static constexpr bool value = + is_valid_v > && + size_v > > 2ul && + any_of_v ,greater_than_one >; +}; + + +} // namespace detail + +/** @brief Returns true if extents is equal to (m,n,[1,...,1],k,[1,...,1]) with k > 1 */ +template +constexpr inline bool is_tensor_v = detail::is_tensor>::value; + + +//////////////// ARRAY_CONVERSION ///////////////////// + +namespace detail { +template +struct to_array_impl_t; + +template +struct to_array_impl_t> +{ static constexpr auto value = std::array{is... }; }; + +template +struct to_array_impl_t> +{ static constexpr auto value = std::array{is... }; }; + +} // namespace detail + +template +constexpr inline auto to_array_v = detail::to_array_impl_t>::value; + + + + +namespace detail { + +template +struct to_strides_impl_t; + +template +struct to_strides_impl_t > +{ + static_assert (is_valid_v); + + static constexpr bool is_first_order = std::is_same_v; + using adjusted_extents = std::conditional_t>; + + template + static constexpr std::size_t selected_product = product_v>>; + + using pre_type = extents <1,( selected_product ) ... >; + using type = std::conditional_t>; +}; + +} // namespace detail + +template +using to_strides_impl_t = typename detail::to_strides_impl_t-1>>::type; + +template +constexpr inline auto to_strides_v = to_array_v,L>>; + +} //namespace boost::numeric::ublas + + +template < + std::size_t l1, + std::size_t l2, + std::size_t r1, + std::size_t r2, + std::size_t ... l, + std::size_t ... r> +[[nodiscard]] inline constexpr bool operator==( + boost::numeric::ublas::extents /*unused*/, + boost::numeric::ublas::extents /*unused*/) +{ + return std::is_same_v< + boost::numeric::ublas::extents, + boost::numeric::ublas::extents>; +} + +template < + std::size_t l1, + std::size_t l2, + std::size_t r1, + std::size_t r2, + std::size_t ... l, + std::size_t ... r> +[[nodiscard]] inline constexpr bool operator!=( + boost::numeric::ublas::extents el, + boost::numeric::ublas::extents er) +{ + return !(el == er); +} + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_FUNCTIONS_HPP + + diff --git a/include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp b/include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp new file mode 100644 index 000000000..f274dfafc --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp @@ -0,0 +1,148 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_SIZE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_SIZE_HPP + +#include +#include +#include +#include +#include + + +#include "extents_functions.hpp" +#include "extents_base.hpp" + +#include "../layout.hpp" +#include "../concepts.hpp" + + +namespace boost::numeric::ublas +{ + +/** @brief Class template for storing static-number of extents + * + * @code auto e = extents<3>{3,2,4}; @endcode + * + * @tparam N number of extents + * + */ +template +class extents_core : public extents_base> +{ +public: + using base_type = std::array; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + constexpr extents_core() = default; + + constexpr explicit extents_core(base_type data) + : _base(std::move(data)) + { + if ( !ublas::is_valid(*this) ){ + throw std::invalid_argument("in boost::numeric::ublas::extents : " + "could not intanstiate extents as provided extents are not valid."); + } + } + + + constexpr extents_core(std::initializer_list const& li) + : _base() + { + if( li.size() != ublas::size(*this) ){ + throw std::length_error("in boost::numeric::ublas::extents : " + "could not intanstiate extents as number of indices exceed N."); + } + + std::copy(li.begin(), li.end(), _base.begin()); + + if ( !ublas::is_valid(*this) ){ + throw std::invalid_argument("in boost::numeric::ublas::extents : " + "could not intanstiate extents as provided extents are not valid."); + } + } + + constexpr extents_core(const_iterator begin, const_iterator end) + { + if( std::distance(begin,end) < 0 || static_cast(std::distance(begin,end)) > this->base().size()){ + throw std::out_of_range("in boost::numeric::ublas::extents : " + "initializer list size is greater than the rank"); + } + + std::copy(begin, end, _base.begin()); + + if ( !ublas::is_valid(*this) ) { + throw std::invalid_argument("in boost::numeric::ublas::extents::ctor: " + "could not intanstiate extents as provided extents are not valid."); + } + } + constexpr extents_core(extents_core const& other) + : _base(other._base) + { + assert(ublas::is_valid(*this)); + } + + constexpr extents_core(extents_core && other) noexcept + : _base( std::move(other._base) ) + { + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + constexpr extents_core& operator=(extents_core other) + noexcept(std::is_nothrow_swappable_v) + { + swap(*this,other); + return *this; + } + + ~extents_core() = default; + + + friend void swap(extents_core& lhs, extents_core& rhs) + noexcept(std::is_nothrow_swappable_v) + { + std::swap(lhs._base, rhs._base); + } + + [[nodiscard]] inline constexpr const_reference at (size_type k) const { return this->_base.at(k); } + [[nodiscard]] inline constexpr const_reference operator[](size_type k) const { return this->_base[k]; } + [[nodiscard]] inline constexpr auto const& base () const noexcept { return this->_base; } + [[nodiscard]] inline constexpr const_pointer data () const noexcept { return this->_base.data(); } + + +private: + base_type _base{}; +}; + +} // namespace boost::numeric::ublas + + + + +namespace boost::numeric::ublas{ +template struct is_extents < extents_core > : std::true_type {}; +template struct is_dynamic < extents_core > : std::true_type {}; +template struct is_static_rank < extents_core > : std::true_type {}; +} // namespace boost::numeric::ublas + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents_functions.hpp deleted file mode 100644 index 46f15f93a..000000000 --- a/include/boost/numeric/ublas/tensor/extents_functions.hpp +++ /dev/null @@ -1,449 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ - -#include -#include -#include -#include -#include -#include - -namespace boost::numeric::ublas::detail{ - - template - constexpr auto push_back(basic_static_extents) -> basic_static_extents; - - template - constexpr auto push_front(basic_static_extents) -> basic_static_extents; - - template - constexpr auto pop_front(basic_static_extents) -> basic_static_extents; - - template - constexpr auto any_extents_greater_than_one([[maybe_unused]] basic_static_extents const& e) noexcept{ - constexpr auto sz = sizeof...(Es); - return sz && ( ( Es > T(1) ) || ... ); - } - - template - constexpr auto squeeze_impl_remove_one( - [[maybe_unused]] basic_static_extents e, - basic_static_extents num = basic_static_extents{} - ){ - // executed when basic_static_extents is size of 1 - // @code basic_static_extents @endcode - if constexpr( sizeof...(E) == 0ul ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - if constexpr( E0 == T(1) ){ - return num; - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - return decltype(push_back(num)){}; - } - }else{ - if constexpr( E0 == T(1) ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - return squeeze_impl_remove_one(basic_static_extents{}, num); - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - auto n_num_list = decltype(push_back(num)){}; - return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); - } - } - } - - template - constexpr auto squeeze_impl( basic_static_extents const& e ){ - - using extents_type = basic_static_extents; - - if constexpr( extents_type::_size <= typename extents_type::size_type(2) ){ - return e; - } - - using value_type = typename extents_type::value_type; - using size_type = typename extents_type::size_type; - - auto one_free_static_extents = squeeze_impl_remove_one(e); - - // check after removing 1s from the list are they same - // if same that means 1s does not exist and no need to - // squeeze - if constexpr( decltype(one_free_static_extents)::_size != extents_type::_size ){ - - // after squeezing, all the extents are 1s we need to - // return extents of (1, 1) - if constexpr( decltype(one_free_static_extents)::_size == size_type(0) ){ - - return basic_static_extents{}; - - }else if constexpr( decltype(one_free_static_extents)::_size == (1) ){ - // to comply with GNU Octave this check is made - // if position 2 contains 1 we push at back - // else we push at front - if constexpr( extents_type::at(1) == value_type(1) ){ - return decltype( push_back(one_free_static_extents) ){}; - }else{ - return decltype( push_front(one_free_static_extents) ){}; - } - - }else{ - return one_free_static_extents; - } - - }else{ - return e; - } - - } - - template - inline - constexpr auto squeeze_impl( basic_extents const& e ){ - using extents_type = basic_extents; - using base_type = typename extents_type::base_type; - using value_type = typename extents_type::value_type; - using size_type = typename extents_type::size_type; - - if( e.size() <= size_type(2) ){ - return e; - } - - auto not_one = [](auto const& el){ - return el != value_type(1); - }; - - // count non one values - size_type size = std::count_if(e.begin(), e.end(), not_one); - - // reserve space - base_type n_extents( std::max(size, size_type(2)), 1 ); - - // copying non 1s to the new extents - std::copy_if(e.begin(), e.end(), n_extents.begin(), not_one); - - // checking if extents size goes blow 2 - // if size of extents goes to 1 - // complying with GNU Octave - // if position 2 contains 1 we - // swap the pos - if( size < size_type(2) && e[1] != value_type(1) ){ - std::swap(n_extents[0], n_extents[1]); - } - - return extents_type(n_extents); - } - - template - inline - auto squeeze_impl( basic_fixed_rank_extents const& e ){ - if constexpr( N <= 2 ){ - return e; - }else{ - return squeeze_impl(basic_extents(e)); - } - } - - - -} // namespace boost::numeric::ublas::detail - -namespace boost::numeric::ublas { - -/** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ -template -[[nodiscard]] inline -constexpr bool is_valid(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_valid() : invalid type, type should be an extents"); - - auto greater_than_zero = [](auto const& a){ return a > 0u; }; - - if( e.size() == 1u ) return e[0] == 1u; - return !e.empty() && std::all_of(e.begin(), e.end(), greater_than_zero ); -} - -/** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ -template -[[nodiscard]] inline -constexpr bool is_valid( [[maybe_unused]] basic_static_extents const &e) noexcept { - constexpr auto sz = sizeof...(Es); - /// if number of extents is 1 then extents at 0th pos should be 1 - /// else if number of extents is greater than 1 then all the extents - /// should be greater than 0 - /// else return false - return ( ( sz == 1ul ) && ( ( T(1) == Es ) && ... ) ) || - ( ( sz > 1ul ) && ( ( T(0) < Es ) && ... ) ); -} - -/** - * @code static_extents<4,1,2,3,4> s; - * std::cout< -[[nodiscard]] inline -std::string to_string(T const &e) { - - using value_type = typename T::value_type; - - static_assert(is_extents_v ||is_strides_v, - "boost::numeric::ublas::to_string() : invalid type, type should be an extents or a strides"); - - if ( e.empty() ) return "[]"; - - std::stringstream ss; - - ss << "[ "; - - std::copy( e.begin(), e.end() - 1, std::ostream_iterator(ss,", ") ); - - ss << e.back() << " ]"; - - return ss.str(); -} - -/** @brief Returns true if this has a scalar shape - * - * @returns true if (1,1,[1,...,1]) - */ -template -[[nodiscard]] inline -constexpr bool is_scalar(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_scalar() : invalid type, type should be an extents"); - - auto equal_one = [](auto const &a) { return a == 1u; }; - - return !e.empty() && std::all_of(e.begin(), e.end(), equal_one); -} - -/** @brief Returns true if this has a scalar shape - * - * @returns true if (1,1,[1,...,1]) - */ -template -[[nodiscard]] inline -constexpr bool is_scalar( [[maybe_unused]] basic_static_extents const &e) noexcept { - constexpr auto sz = sizeof...(Es); - /// if number of extents is greater than 1 then all the extents should be 1 - /// else return false; - return sz && ( ( T(1) == Es ) && ... ); -} - -/** @brief Returns true if this has a vector shape - * - * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_vector(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_vector() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; - - if (e.empty()) return false; - if (e.size() == 1u) return e[0] > 1u; - return std::any_of(e.begin(), e.begin() + 2, greater_one) && - std::any_of(e.begin(), e.begin() + 2, equal_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); - -} - -/** @brief Returns true if this has a vector shape - * - * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_vector( [[maybe_unused]] basic_static_extents const &e) noexcept { - using extents_type = basic_static_extents; - - if constexpr (sizeof... (Es) == 1ul) return extents_type::at(0) > T(1); - else if constexpr (sizeof... (Es) >= 2ul){ - /// first two elements of the extents cannot be greater than 1 at the - /// same time which xor operation keeps in check - /// example: 0 xor 1 => 1, 1 xor 1 => 0, 1 xor 0 => 1, and 0 xor 0 => 0 - constexpr bool first_two_extents = ( extents_type::at(0) > T(1) ) ^ ( extents_type::at(1) > T(1) ); - - /// poping first two elements from the extents and checking is_scalar - /// basic_static_extents after poping two times becomes - /// basic_static_extents - using extents_after_removing_the_first_element = decltype( detail::pop_front( e ) ); - using extents_after_removing_the_second_element = decltype( detail::pop_front( extents_after_removing_the_first_element{} ) ); - return first_two_extents && - ( extents_after_removing_the_second_element::_size == 0ul || - is_scalar(extents_after_removing_the_second_element{}) - ); - } else return false; -} - -/** @brief Returns true if this has a matrix shape - * - * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_matrix(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_matrix() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; - - return ( e.size() >= 2u ) && std::all_of(e.begin(), e.begin() + 2, greater_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); -} - -/** @brief Returns true if this has a matrix shape - * - * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_matrix( [[maybe_unused]] basic_static_extents const &e) noexcept { - using extents_type = basic_static_extents; - - if constexpr (sizeof... (Es) >= 2ul){ - /// first two elements of the extents should be greater than 1 at the - /// same time and remaing range should be scalar or empty - constexpr bool first_two_extents = ( extents_type::at(0) > T(1) ) && ( extents_type::at(1) > T(1) ); - - /// poping first two elements from the extents and checking is_scalar - /// basic_static_extents after poping two times becomes - /// basic_static_extents - using extents_after_removing_the_first_element = decltype( detail::pop_front( e ) ); - using extents_after_removing_the_second_element = decltype( detail::pop_front( extents_after_removing_the_first_element{} ) ); - return first_two_extents && - ( extents_after_removing_the_second_element::_size == 0ul || - is_scalar(extents_after_removing_the_second_element{}) - ); - } else return false; -} - -/** @brief Returns true if this is has a tensor shape - * - * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() - */ -template -[[nodiscard]] inline -constexpr bool is_tensor(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_tensor() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u;}; - - return ( e.size() >= 3u ) && std::any_of(e.begin() + 2, e.end(), greater_one); -} - -/** @brief Returns true if this is has a tensor shape - * - * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() - */ -template -[[nodiscard]] inline -constexpr bool is_tensor( [[maybe_unused]] basic_static_extents const &e) noexcept { - if constexpr( sizeof...(Es) >= 3ul ){ - /// poping first two elements from the extents and checking the remaining - /// extent, if any extent is greater than 1 - /// basic_static_extents after poping two times becomes - /// basic_static_extents - using extents_after_removing_the_first_element = decltype( detail::pop_front( e ) ); - using extents_after_removing_the_second_element = decltype( detail::pop_front( extents_after_removing_the_first_element{} ) ); - return detail::any_extents_greater_than_one(extents_after_removing_the_second_element{}); - - } else return false; -} - -/** @brief Eliminates singleton dimensions when size > 2 - * - * squeeze { 1,1} -> { 1,1} - * squeeze { 2,1} -> { 2,1} - * squeeze { 1,2} -> { 1,2} - * - * squeeze {1,2,3} -> { 2,3} - * squeeze {2,1,3} -> { 2,3} - * squeeze {1,3,1} -> { 1,3} - * - * @returns basic_extents with squeezed extents - */ -template -[[nodiscard]] inline -auto squeeze(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::squeeze() : invalid type, type should be an extents"); - - return detail::squeeze_impl(e); -} - -/** @brief Returns the product of extents */ -template -[[nodiscard]] inline -constexpr auto product(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::product() : invalid type, type should be an extents"); - - if ( e.empty() ) return 0u; - return std::accumulate(e.begin(), e.end(), 1u, std::multiplies<>()) ; -} - -/** @brief Returns the product of static extents at compile-time */ -template -[[nodiscard]] inline -constexpr auto product( [[maybe_unused]] basic_static_extents const &e) noexcept { - if constexpr( sizeof...(Es) == 0 ) return T(0); - else return T( (Es * ...) ); -} - - -template && is_extents_v - , int> = 0 -> -[[nodiscard]] inline -constexpr bool operator==(LExtents const& lhs, RExtents const& rhs) noexcept{ - - static_assert( std::is_same_v, - "boost::numeric::ublas::operator==(LExtents, RExtents) : LHS value type should be same as RHS value type"); - - return ( lhs.size() == rhs.size() ) && std::equal(lhs.begin(), lhs.end(), rhs.begin()); -} - -template && is_extents_v - , int> = 0 -> -[[nodiscard]] inline -constexpr bool operator!=(LExtents const& lhs, RExtents const& rhs) noexcept{ - - static_assert( std::is_same_v, - "boost::numeric::ublas::operator!=(LExtents, RExtents) : LHS value type should be same as RHS value type"); - - return !( lhs == rhs ); -} - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp b/include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp deleted file mode 100644 index a3aa3603d..000000000 --- a/include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp +++ /dev/null @@ -1,248 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_FIXED_RANK_EXTENTS_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_FIXED_RANK_EXTENTS_HPP_ - -#include -#include -#include -#include -#include -#include - -namespace boost::numeric::ublas { - -/** @brief Template class for storing tensor extents for compile time. - * - * @code basic_static_extents<1,2,3,4> t @endcode - * @tparam E parameter pack of extents - * - */ -template -class basic_fixed_rank_extents -{ - -public: - - static constexpr std::size_t const _size = N; - - using base_type = std::array; - using value_type = typename base_type::value_type; - using size_type = typename base_type::size_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - static_assert( std::numeric_limits::is_integer, "Static error in basic_fixed_rank_extents: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, "Static error in basic_fixed_rank_extents: type must be of type unsigned integer."); - - //@returns the rank of basic_static_extents - [[nodiscard]] - static constexpr size_type size() noexcept { return _size; } - - [[nodiscard]] inline - constexpr const_reference at(size_type k) const{ - return _base.at(k); - } - - [[nodiscard]] inline - constexpr reference at(size_type k){ - return _base.at(k); - } - - [[nodiscard]] inline - constexpr const_reference operator[](size_type k) const noexcept{ - return _base[k]; - } - - [[nodiscard]] inline - constexpr reference operator[](size_type k) noexcept{ - return _base[k]; - } - - constexpr basic_fixed_rank_extents() = default; - - constexpr basic_fixed_rank_extents(basic_fixed_rank_extents const& other) - : _base(other._base) - {} - - constexpr basic_fixed_rank_extents(basic_fixed_rank_extents && other) noexcept - : _base( std::move(other._base) ) - {} - - constexpr basic_fixed_rank_extents& operator=(basic_fixed_rank_extents const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_fixed_rank_extents temp(other); - swap(*this,temp); - return *this; - } - - constexpr basic_fixed_rank_extents& operator=(basic_fixed_rank_extents && other) - noexcept(std::is_nothrow_swappable_v) - { - swap(*this,other); - return *this; - } - - ~basic_fixed_rank_extents() = default; - - constexpr basic_fixed_rank_extents(std::initializer_list li){ - if( li.size() > _size ){ - throw std::out_of_range("boost::numeric::ublas::basic_fixed_rank_extents(std::initializer_list): " - "number of elements in std::initializer_list is greater than the size" - ); - } - - std::copy(li.begin(), li.end(), _base.begin()); - - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents() : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - constexpr basic_fixed_rank_extents(const_iterator begin, const_iterator end){ - if( std::distance(begin,end) < 0 || static_cast(std::distance(begin,end)) > _size){ - throw std::out_of_range("boost::numeric::ublas::basic_fixed_rank_extents(): initializer list size is greater than the rank"); - } - - std::copy(begin, end, _base.begin()); - - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(const_iterator,const_iterator) : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - inline - constexpr void fill( value_type value ){ - _base.fill(value); - } - - template - constexpr basic_fixed_rank_extents(OtherExtents const& e){ - static_assert( is_extents_v, "boost::numeric::ublas::basic_fixed_rank_extents(OtherExtents const&) : " - "OtherExtents should be a valid tensor extents" - ); - - if constexpr( is_static_rank_v< OtherExtents > ){ - static_assert( OtherExtents::_size == _size, - "basic_fixed_rank_extents::basic_fixed_rank_extents(OtherExtents const&) : " - "unequal rank found, rank should be equal" - ); - }else{ - if( e.size() != size() ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(OtherExtents const&) : " - "unequal rank found, rank should be equal" - ); - } - } - - std::copy_n(e.begin(),_size, _base.begin()); - } - - constexpr basic_fixed_rank_extents(base_type const& data) - : _base(data) - { - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(base_type const&) : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - constexpr basic_fixed_rank_extents(base_type&& data) - : _base(std::move(data)) - { - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(base_type &&) : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - /** @brief Returns the std::vector containing extents */ - [[nodiscard]] inline - constexpr base_type const& base() const noexcept{ - return _base; - } - - /** @brief Checks if extents is empty or not - * - * @returns true if rank is 0 else false - * - */ - [[nodiscard]] inline - constexpr bool empty() const noexcept { return _size == size_type{0}; } - - friend void swap(basic_fixed_rank_extents& lhs, basic_fixed_rank_extents& rhs) - noexcept(std::is_nothrow_swappable_v) - { - std::swap(lhs._base , rhs._base ); - } - - [[nodiscard]] inline - constexpr const_pointer data() const noexcept - { - return _base.data(); - } - - [[nodiscard]] inline - constexpr const_iterator - begin() const noexcept - { - return _base.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator - end() const noexcept - { - return _base.end(); - } - - [[nodiscard]] inline - constexpr const_reference back () const - { - return _base.back(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return _base.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return _base.rend(); - } - -private: - base_type _base{}; -}; - -} // namespace boost::numeric::ublass - - - -#endif diff --git a/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp b/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp index 952f6e29e..011a4c64c 100644 --- a/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp +++ b/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,11 +12,17 @@ /// \file strides.hpp Definition for the basic_strides template class -#ifndef _BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP_ -#define _BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP_ -#include -#include +#ifndef BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP +#define BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP + +#if 0 + +#include "detail/strides_functions.hpp" +#include "extents/extents_static_size.hpp" +#include "layout.hpp" +#include "strides_base.hpp" + namespace boost::numeric::ublas { @@ -25,126 +31,63 @@ namespace boost::numeric::ublas { * Proxy template class of std::array. * */ -template -class basic_fixed_rank_strides +template +class strides,L> : public strides_base,L>> { public: - static constexpr std::size_t const _size = N; - - using layout_type = L; - using base_type = std::array; - using value_type = typename base_type::value_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - static_assert( std::numeric_limits::is_integer, - "Static error in boost::numeric::ublas::basic_fixed_rank_strides: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, - "Static error in boost::numeric::ublas::basic_fixed_rank_strides: type must be of type unsigned integer."); - static_assert(std::is_same::value || std::is_same::value, - "Static error in boost::numeric::ublas::basic_fixed_rank_strides: layout type must either first or last order"); - - /** @brief Default constructs basic_fixed_rank_strides + using extents_type = extents; + using layout_type = L; + using base_type = typename extents_type::base_type; + using value_type = typename base_type::value_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using size_type = typename base_type::size_type; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + + static_assert(std::is_same::value || + std::is_same::value); + /** @brief Default constructs strides with static size * - * @code auto ex = basic_fixed_rank_strides{}; + * @code auto s = strides>{}; */ - constexpr basic_fixed_rank_strides() noexcept = default; + constexpr strides() noexcept = default; - /** @brief Constructs basic_fixed_rank_strides from basic_extents for the first- and last-order storage formats + /** @brief Constructs strides from extents with static size for the first- and last-order storage formats * - * @code auto strides = basic_fixed_rank_strides( basic_extents{2,3,4} ); + * @code auto s = strides>({2,3,4},layout::first_order{}); * */ - template - constexpr basic_fixed_rank_strides(ExtentsType const& s) + constexpr explicit strides(extents_type const& e) + : _base(compute_strides(e)) { - static_assert( is_extents_v, "boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "ExtentsType is not a tensor extents" - ); - - if constexpr( is_static_rank_v< ExtentsType > ){ - static_assert( ExtentsType::_size == _size, - "boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "ExentsType size should be equal to the size of basic_fixed_rank_strides" - ); - }else{ - if ( s.size() != size() ){ - throw std::length_error( - "boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "ExentsType size should be equal to the size of basic_fixed_rank_strides" - ); - } - } - - _base.fill(value_type(1)); - - if( s.empty() ) - return; - - if( !is_valid(s) ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "shape is not valid." - ); - - if( is_vector(s) || is_scalar(s) ) - return; - - if( this->size() < 2 ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "size of strides must be greater or equal 2." - ); - - - if constexpr (std::is_same::value){ - std::transform(s.begin(), s.end() - 1, _base.begin(), _base.begin() + 1, std::multiplies{}); - }else { - std::transform(s.rbegin(), s.rend() - 1, _base.rbegin(), _base.rbegin() + 1, std::multiplies{}); - } - } - - constexpr basic_fixed_rank_strides(basic_fixed_rank_strides const& l) + } + + constexpr strides(strides const& l) : _base(l._base) {} - constexpr basic_fixed_rank_strides(basic_fixed_rank_strides && l ) noexcept + constexpr strides(strides && l ) noexcept : _base(std::move(l._base)) {} - constexpr basic_fixed_rank_strides(base_type const& l ) - : _base(l) - {} + ~strides() = default; - constexpr basic_fixed_rank_strides(base_type && l ) - : _base(std::move(l)) - {} - - ~basic_fixed_rank_strides() = default; - - - basic_fixed_rank_strides& operator=(basic_fixed_rank_strides const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_fixed_rank_strides temp(other); - swap (*this, temp); - return *this; - } - basic_fixed_rank_strides& operator=(basic_fixed_rank_strides && other) + strides& operator=(strides other) noexcept(std::is_nothrow_swappable_v) { swap (*this, other); return *this; } - friend void swap(basic_fixed_rank_strides& lhs, basic_fixed_rank_strides& rhs) + friend void swap(strides& lhs, strides& rhs) noexcept(std::is_nothrow_swappable_v) { - std::swap(lhs._base , rhs._base); + std::swap(lhs._base,rhs._base); } [[nodiscard]] inline @@ -191,7 +134,7 @@ class basic_fixed_rank_strides constexpr const_iterator end() const noexcept{ return _base.end(); } - + [[nodiscard]] inline constexpr base_type const& base() const noexcept{ return this->_base; @@ -212,10 +155,41 @@ class basic_fixed_rank_strides } private: - base_type _base; + base_type _base; + static constexpr std::size_t const _size = N; + + + [[nodiscard]] inline auto compute_strides( extents_type const& e) + { + using base_type = typename extents_type::base_type; + namespace ub = boost::numeric::ublas; + auto init = [](std::index_sequence){ return base_type{is...}; }; + + auto s = init(std::make_index_sequence{}); + + if (std::tuple_size_v == 0UL) + return s; + if (ub::is_vector(e) || ub::is_scalar(e)) + return s; + + if constexpr(std::is_same_v){ + std::transform(ub::begin(e), ub::end(e) - 1, s.begin(), s.begin() + 1, std::multiplies<>{}); + } + else { + std::transform(ub::rbegin(e), ub::rbegin(e) - 1, s.rbegin(), s.rbegin() + 1, std::multiplies<>{}); + } + + return s; + } }; -} // namespace boost::numeric::ublass +template struct is_strides ,L>> : std::true_type {}; +template struct is_dynamic ,L>> : std::true_type {}; +template struct is_static_rank ,L>> : std::true_type {}; + +} // namespace boost::numeric::ublas + +#endif #endif diff --git a/include/boost/numeric/ublas/tensor/function/conj.hpp b/include/boost/numeric/ublas/tensor/function/conj.hpp new file mode 100644 index 000000000..d30d08385 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/conj.hpp @@ -0,0 +1,81 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_CONJ_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_CONJ_HPP + +#include +#include +#include + + +#include "../extents/extents_functions.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; + +template +struct tensor_engine; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ +/** @brief Computes the complex conjugate component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template::value_type>, int > = 0 + > +auto conj(detail::tensor_expression< tensor_core, D > const& expr) +{ + return detail::make_unary_tensor_expression< tensor_core > (expr(), [] (auto const& l) { return std::conj( l ); } ); +} + +/** @brief Computes the complex conjugate component of tensor elements within a tensor expression + * + * @param[in] expr tensor expression + * @returns complex tensor + */ +template +auto conj(detail::tensor_expression const& expr) +{ + using tensor_type = T; + using value_type = typename tensor_type::value_type; + using complex_type = std::complex; + using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using extents_type = typename tensor_type::extents_type; + using return_container_type = typename container_traits::template rebind; + using return_tensor_type = tensor_core>; + + if( ublas::empty( detail::retrieve_extents( expr ) ) ){ + throw std::runtime_error("error in boost::numeric::ublas::conj: tensors should not be empty."); + } + + auto a = tensor_type( expr ); + auto c = return_tensor_type( a.extents() ); + + std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::conj(l) ; } ); + + return c; +} + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/imag.hpp b/include/boost/numeric/ublas/tensor/function/imag.hpp new file mode 100644 index 000000000..0058cb188 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/imag.hpp @@ -0,0 +1,82 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_IMAG_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_IMAG_HPP + +#include +#include +#include + + +#include "../extents/extents_functions.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; + +template +struct tensor_engine; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Extract the imaginary component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template +auto imag(detail::tensor_expression const& lhs) { + return detail::make_unary_tensor_expression (lhs(), [] (auto const& l) { return std::imag( l ); } ); +} + + +/** @brief Extract the imag component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template::value_type>, int > = 0 + > +auto imag(detail::tensor_expression< tensor_core< TE > ,D> const& expr) +{ + using tensor_type = tensor_core< TE >; + using complex_type = typename tensor_type::value_type; + using value_type = typename complex_type::value_type; + using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using extents_type = typename tensor_type::extents_type; + using return_container_type = typename container_traits::template rebind; + + using return_tensor_type = tensor_core>; + + if( ublas::empty( detail::retrieve_extents( expr ) ) ){ + throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); + } + + auto a = tensor_type( expr ); + auto c = return_tensor_type( a.extents() ); + + std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::imag(l) ; } ); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/init.hpp b/include/boost/numeric/ublas/tensor/function/init.hpp new file mode 100644 index 000000000..3a19c3507 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/init.hpp @@ -0,0 +1,120 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_FUNCTIONS_INIT_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_FUNCTIONS_INIT_HPP + +#include "../extents.hpp" +#include "../tensor.hpp" +#include "../concepts.hpp" + +//#include + + +namespace boost::numeric::ublas +{ + +template +struct init +{ + using container = std::vector; + using tensor = tensor_core, L, container>>; + + inline auto operator()(extents<> const& e) const + { + auto p = ublas::product(e); + return tensor(e,container(p,V{n})); + } + + + template + inline auto operator()(Ns ... ns) const + { + auto p = ( std::size_t(1) * ... * std::size_t(ns) ); + return tensor(extents<>{std::size_t(ns)...},container(p,V{n})); + } +}; + +template +using ones = init; + +template +using zeros = init; + + + +template +struct init_static_rank +{ + using container = std::vector; + + template + inline auto operator()(extents const& e) const + { + auto p = ublas::product(e); + using tensor = tensor_core, L, container>>; + + return tensor(e, container(p,V{k})); + } + + + template + inline auto operator()(Ns ... ns) const + { + constexpr auto n = sizeof...(ns); + auto p = ( std::size_t(1) * ... * std::size_t(ns) ); + using tensor = tensor_core, L, container>>; + + return tensor(extents{std::size_t(ns)...}, container(p,V{k})); + } +}; + +template +using ones_static_rank = init_static_rank; + +template +using zeros_static_rank = init_static_rank; + + +template +struct init_static +{ + template + struct inner; + + template + struct inner> + { + static constexpr auto n = sizeof...(is); + // NOLINTNEXTLINE(bugprone-integer-division) + static constexpr auto value = std::array{ V(k*(is+1)/(is+1)) ... }; + }; + + template + constexpr inline auto operator()(extents const& /**/) const + { + using extents_type = extents; + constexpr auto p = product_v; + constexpr auto c = inner>::value; + using tensor = tensor_core>>; + return tensor(c); + } +}; + + +template +using ones_static = init_static; + +template +using zeros_static = init_static; + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_FUNCTIONS_ONES_HPP diff --git a/include/boost/numeric/ublas/tensor/function/inner_prod.hpp b/include/boost/numeric/ublas/tensor/function/inner_prod.hpp new file mode 100644 index 000000000..e82e2d10d --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/inner_prod.hpp @@ -0,0 +1,68 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_INNER_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_INNER_HPP + +#include +#include + +#include "../extents.hpp" +#include "../multiplication.hpp" + + +namespace boost::numeric::ublas +{ +template +class tensor_core; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Computes the inner product of two tensors * + * Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,jp]) + * + * @note calls inner function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns a value type. + */ +template +inline decltype(auto) inner_prod(tensor_core< TE1 > const &a, tensor_core< TE2 > const &b) +{ + using value_type = typename tensor_core< TE1 >::value_type; + + static_assert( + std::is_same_v::value_type>, + "error in boost::numeric::ublas::inner_prod(tensor_core< TE1 > const&, tensor_core< TensorEngine2 > const&): " + "Both the tensor should have the same value_type" + ); + + if (a.rank() != b.rank()) + throw std::length_error("error in boost::numeric::ublas::inner_prod: Rank of both the tensors must be the same."); + + if (a.empty() || b.empty()) + throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensors should not be empty."); + + //if (a.extents() != b.extents()) + if (::operator!=(a.extents(),b.extents())) + throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensor extents should be the same."); + + return inner(a.rank(), a.extents().data(), + a.data(), a.strides().data(), + b.data(), b.strides().data(), value_type{0}); +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/norm.hpp b/include/boost/numeric/ublas/tensor/function/norm.hpp new file mode 100644 index 000000000..7f4922f49 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/norm.hpp @@ -0,0 +1,60 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP + +#include +#include +#include + + +#include "../traits/basic_type_traits.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** + * + * @brief Computes the frobenius nor of a tensor + * + * @note Calls accumulate on the tensor. + * + * implements + * k = sqrt( sum_(i1,...,ip) A(i1,...,ip)^2 ) + * + * @tparam V the data type of tensor + * @tparam F the format of tensor storage + * @tparam A the array_type of tensor + * @param a the tensor whose norm is expected of rank p. + * @return the frobenius norm of a tensor. + */ +template +inline auto norm(tensor_core< TE > const &a) +{ + using value_type = typename tensor_core< TE >::value_type; + + if (a.empty()) { + throw std::runtime_error("Error in boost::numeric::ublas::norm: tensors should not be empty."); + } + + return std::sqrt(accumulate(a.order(), a.extents().data(), a.data(), a.strides().data(), value_type{}, + [](auto const &l, auto const &r) { return l + r * r; })); +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/outer_prod.hpp b/include/boost/numeric/ublas/tensor/function/outer_prod.hpp new file mode 100644 index 000000000..2adb6fef7 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/outer_prod.hpp @@ -0,0 +1,283 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_OUTER_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_OUTER_HPP + +#include +#include + +#include "../extents.hpp" +#include "../multiplication.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" + + +namespace boost::numeric::ublas +{ + +template +struct tensor_engine; + +template +class tensor_core; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + + +namespace detail{ +/** Enables if E1 or E1 is dynamic extents with static rank + * + * extents< > & extents + * extents & extents< > + * extents< > & extents< > + * + */ +template< + class TEA, + class TEB, + class EA = typename tensor_core::extents_type, + class EB = typename tensor_core::extents_type + > +using enable_outer_if_one_extents_has_dynamic_rank = std::enable_if_t< + ( is_dynamic_rank_v || is_dynamic_rank_v) && + (!is_static_v || !is_static_v ) , bool >; + +} // namespace detail + +/** @brief Computes the outer product of two tensors + * + * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] + * + * @note calls outer function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns tensor object C with the same storage format F and allocator type A1 + */ +template = true > +inline auto outer_prod( tensor_core< TEA > const &a, tensor_core< TEB > const &b) +{ + using tensorA = tensor_core< TEA >; + using tensorB = tensor_core< TEB >; + using valueA = typename tensorA::value_type; + using extentsA = typename tensorA::extents_type; + + using valueB = typename tensorB::value_type; + using extentsB = typename tensorB::extents_type; + + using tensorC = std::conditional_t < is_dynamic_rank_v, tensorA, tensorB>; +// using valueC = typename tensorC::value_type; + using extentsC = typename tensorC::extents_type; + + static_assert( std::is_same_v ); + static_assert( is_dynamic_rank_v || is_dynamic_rank_v); + + if (a.empty() || b.empty()){ + throw std::runtime_error("Error in boost::numeric::ublas::outer_prod: tensors should not be empty."); + } + + auto const& na = a.extents(); + auto const& nb = b.extents(); + + auto nc_base = typename extentsC::base_type(ublas::size(na)+ublas::size(nb)); + auto nci = std::copy(ublas::begin(na),ublas::end(na), std::begin(nc_base)); + std::copy(ublas::begin(nb),ublas::end(nb), nci); + auto nc = extentsC(nc_base); + + auto c = tensorC( nc, valueA{} ); + + outer(c.data(), c.rank(), nc.data(), c.strides().data(), + a.data(), a.rank(), na.data(), a.strides().data(), + b.data(), b.rank(), nb.data(), b.strides().data()); + + return c; +} + + +namespace detail{ +/** Enables if extents E1, E1 + * + * both are dynamic extents with static rank + * + * extents & extents + * + */ +template< + class TEA, + class TEB, + class E1 = typename tensor_core::extents_type, + class E2 = typename tensor_core::extents_type + > +using enable_outer_if_both_extents_have_static_rank = std::enable_if_t< + ( is_static_rank_v && is_dynamic_v) && + ( is_static_rank_v && is_dynamic_v) , bool >; +} // namespace detail + +/** @brief Computes the outer product of two tensors + * + * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] + * + * @note calls outer function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns tensor object C with the same storage format F and allocator type A1 + */ +template = true > +inline auto outer_prod(tensor_core const &a, tensor_core const &b) +{ + using tensorA = tensor_core; + using valueA = typename tensorA::value_type; + using layoutA = typename tensorA::layout_type; + using extentsA = typename tensorA::extents_type; + using containerA = typename tensorA::container_type; + using resizableA_tag = typename tensorA::resizable_tag; + + using tensorB = tensor_core; + using valueB = typename tensorB::value_type; +// using layoutB = typename tensorB::layout_type; + using extentsB = typename tensorB::extents_type; + using resizableB_tag = typename tensorB::resizable_tag; + + static_assert(std::is_same_v); + static_assert(is_static_rank_v || is_static_rank_v); + + static_assert(std::is_same_v); + static_assert(std::is_same_v); + + if (a.empty() || b.empty()) + throw std::runtime_error("error in boost::numeric::ublas::outer_prod: tensors should not be empty."); + + auto const& na = a.extents(); + auto const& nb = b.extents(); + + constexpr auto sizeA = std::tuple_size_v; + constexpr auto sizeB = std::tuple_size_v; + + using extentsC = extents; + using tensorC = tensor_core>; + + auto nc_base = typename extentsC::base_type{}; + auto nci = std::copy(ublas::begin(na), ublas::end(na), std::begin(nc_base)); + std::copy(ublas::begin(nb),ublas::end(nb), nci); + auto nc = extentsC( nc_base ); + + auto c = tensorC( nc ); + + outer(c.data(), c.rank(), nc.data(), c.strides().data(), + a.data(), a.rank(), na.data(), a.strides().data(), + b.data(), b.rank(), nb.data(), b.strides().data()); + + return c; +} + + +namespace detail { + +// concat two static_stride_list togather +// @code using type = typename concat< static_stride_list, static_stride_list >::type @endcode +template +struct concat; + +template +struct concat< basic_static_extents, basic_static_extents > { + using type = basic_static_extents; +}; + +template +using concat_t = typename concat::type; + +} // namespace detail + +namespace detail { +/** Enables if extents E1, E1 + * + * both are dynamic extents with static rank + * + * extents & extents + * + */ +template< + class TEA, + class TEB, + class E1 = typename tensor_core::extents_type, + class E2 = typename tensor_core::extents_type + > +using enable_outer_if_both_extents_are_static = std::enable_if_t< + ( is_static_v && is_static_v) , bool>; + +} // namespace detail +/** @brief Computes the outer product of two tensors + * + * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] + * + * @note calls outer function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns tensor object C with the same storage format F and allocator type A1 + */ +template = true > +inline decltype(auto) outer_prod(tensor_core const &a, tensor_core const &b) +{ + using tensorA = tensor_core; + using valueA = typename tensorA::value_type; + using layoutA = typename tensorA::layout_type; + using extentsA = typename tensorA::extents_type; + using arrayA = typename tensorA::array_type; +// using resizableA_tag = typename tensorA::resizable_tag; + + using tensorB = tensor_core; + using valueB = typename tensorB::value_type; +// using layoutB = typename tensorB::layout_type; + using extentsB = typename tensorB::extents_type; +// using resizableB_tag = typename tensorB::resizable_tag; + + using extentsC = ublas::cat_t;// detail::concat_t; + using layoutC = layoutA; + using valueC = valueA; + using storageC = rebind_storage_size_t; + using tensorC = tensor_core>; + + static_assert(std::is_same_v); + static_assert(is_static_v || is_static_v); + + constexpr auto extentsA_size = std::tuple_size_v; + constexpr auto extentsB_size = std::tuple_size_v; + + + if (a.empty() || b.empty()) + throw std::runtime_error("error in boost::numeric::ublas::outer_prod: tensors should not be empty."); + + auto nc = extentsC{}; + + auto const& na = a.extents(); + auto const& nb = b.extents(); + + auto c = tensorC(valueC{}); + + outer(c.data(), c.rank(), data(nc), c.getStrides().data(), + a.data(), a.rank(), data(na), a.getStrides().data(), + b.data(), b.rank(), data(nb), b.getStrides().data()); + + return c; +} + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/real.hpp b/include/boost/numeric/ublas/tensor/function/real.hpp new file mode 100644 index 000000000..302637776 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/real.hpp @@ -0,0 +1,80 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_REAL_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_REAL_HPP + +#include +#include +#include + + +#include "../extents/extents_functions.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; + +template +struct tensor_engine; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Extract the real component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template +auto real(detail::tensor_expression const& expr) { + return detail::make_unary_tensor_expression (expr(), [] (auto const& l) { return std::real( l ); } ); +} + +/** @brief Extract the real component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template::value_type>, int > = 0 + > +auto real(detail::tensor_expression< tensor_core< TE > ,D > const& expr) +{ + + using tensor_type = tensor_core< TE >; + using complex_type = typename tensor_type::value_type; + using value_type = typename complex_type::value_type; + using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using extents_type = typename tensor_type::extents_type; + using storage_type = typename container_traits::template rebind; + using return_tensor_engine = tensor_engine; + using return_tensor_type = tensor_core; + + if( ublas::empty ( detail::retrieve_extents( expr ) ) ) + throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); + + auto a = tensor_type( expr ); + auto c = return_tensor_type( a.extents() ); + + std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::real(l) ; } ); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/reshape.hpp b/include/boost/numeric/ublas/tensor/function/reshape.hpp new file mode 100644 index 000000000..d27e59446 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/reshape.hpp @@ -0,0 +1,87 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_RESHAPE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_RESHAPE_HPP + +#include "../extents.hpp" +#include "../tensor.hpp" + +namespace boost::numeric::ublas{ + +/** Enables prod(ttt) if E1 or E1 is dynamic extents with static rank + * + * extents< > & extents + * extents & extents< > + * extents< > & extents< > + * + */ +template< + class TE, + class E = typename tensor_core< TE >::extents_type + > +using enable_reshape_if_shape_is_dynamic = std::enable_if_t< is_dynamic_v , bool >; + +/** @brief Reshapes the basic_tensor + * + * + * (1) @code auto b = a.reshape(extents{m,n,o}); @endcode or + * (2) @code auto b = a.reshape(extents{m,n,o},4); @endcode + * + * If the size of this smaller than the specified extents than + * default constructed (1) or specified (2) value is appended. + * + * @note rank of the basic_tensor might also change. + * + * @param e extents with which the basic_tensor is reshaped. + * @param v value which is appended if the basic_tensor is enlarged. + */ +template< class E, class D, + enable_reshape_if_shape_is_dynamic = true> +[[nodiscard]] constexpr auto reshape (tensor_core const& t, extents_base const& e, [[maybe_unused]] typename tensor_core::value_type v = {}) +{ + using from_engine_type = E; + using from_tensor_type = tensor_core; +// using from_extents_type = typename from_tensor_type::extents_type; + using from_container_type = typename from_tensor_type::container_type; + using from_layout_type = typename from_tensor_type::layout_type; + + using to_extents_type = D; + using to_engine_type = tensor_engine; + using to_tensor_type = tensor_core; + + auto const& efrom = t.extents(); + auto const& eto = e(); + + if( ::operator==(efrom,eto) ) + return t; + + auto const to_size = product(eto); + auto const from_size = product(efrom); + + + auto r = to_tensor_type(eto); + const auto m = std::min(to_size,from_size); + std::copy(t.begin() , t.begin()+m ,r.begin() ); + + if(m < to_size){ + const auto n = to_size - m; + std::fill_n(r.begin()+m,n,v); + } + + return r; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_COMMON_HPP diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp new file mode 100644 index 000000000..9e5c9acff --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp @@ -0,0 +1,256 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TTM_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TTM_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../tensor.hpp" + + +/** @brief Computes the m-mode tensor-times-matrix product + * + * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] + * + * @note calls ublas::ttm + * + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * @param[in] m contraction dimension with 1 <= m <= p + * + * @returns tensor object C with order p, the same storage format and allocator type as A + */ + +//namespace boost::numeric::ublas +//{ + +//template +//struct tensor_engine; + +//template +//class tensor_core; + +//template +//class matrix; + +//} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +namespace detail { +template< class TE, class E = typename tensor_core::extents_type > +using enable_ttm_if_extent_is_modifiable = std::enable_if_t, bool>; +} // namespace detail + +template ::value_type, + typename L = typename tensor_core::layout_type, + detail::enable_ttm_if_extent_is_modifiable = true > +inline decltype(auto) prod( tensor_core< TE > const &a, matrix const &b, const std::size_t m) +{ + using tensor_type = tensor_core< TE >; + using extents_type = typename tensor_type::extents_type; + using layout_type = typename tensor_type::layout_type; + using resizeable_tag = typename tensor_type::resizable_tag; + + static_assert(std::is_same_v ); + static_assert(is_dynamic_v); + + auto const p = a.rank(); + auto const& na = a.extents(); + auto nb = extents<2>{std::size_t(b.size1()), std::size_t(b.size2())}; + + assert( p != 0 ); + assert( p == ublas::size(na)); + + if( m == 0 ) throw std::length_error("Error in boost::numeric::ublas::ttm: contraction mode must be greater than zero."); + if( p < m ) throw std::length_error("Error in boost::numeric::ublas::ttm: tensor order must be greater than or equal to the specified mode."); + if(na[m-1]!=nb[1]) throw std::invalid_argument("Error in boost::numeric::ublas::ttm: 2nd extent of B and m-th extent of A must be equal."); + + + auto nc_base = na.base(); + auto wb = ublas::to_strides(nb,layout_type{}); + nc_base[m-1] = nb[0]; + auto nc = extents_type(nc_base); + auto c = tensor_type(nc); + + assert( std::equal(begin(na) , begin(na)+m-1, begin(nc) ) ); + assert( std::equal(begin(na)+m, end (na), begin(nc)+m) ); + assert( nc[m-1] == nb[0] ); + + auto const* bb = &(b(0, 0)); + ttm(m, p, + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + bb, nb.data(), wb.data()); + + return c; +} + + +/** @brief Computes the m-mode tensor-times-matrix product + * + * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] + * + * @note calls ublas::ttm + * + * @tparam M contraction dimension with 1 <= M <= p + * @tparam N is a non contracting dimension + * @tparam TE TensorEngine is used for the tensor + * + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p, the same storage format and allocator type as A + */ + +namespace detail { +template< class TE, class E = typename tensor_core< TE >::extents_type > +using enable_ttm_if_extent_is_not_resizable = + std::enable_if_t && is_dynamic_v, bool>; +} // namespace detail + +template ::value_type, + typename L = typename tensor_core< TE >::layout_type, + detail::enable_ttm_if_extent_is_not_resizable> +inline decltype(auto) prod(tensor_core const &a, matrix const &b) +{ + using tensor_type = tensor_core; + using extents_type = typename tensor_type::extents_type; + using layout_type = typename tensor_type::layout_type; + using resizeable_tag = typename tensor_type::resizable_tag; + + static_assert(std::is_same_v ); + static_assert(is_dynamic_v); + + constexpr auto p = std::tuple_size_v; + + auto const& na = a.extents(); + auto nb = extents<2>{std::size_t(b.size1()), std::size_t(b.size2())}; + + static_assert( p != 0 ); + static_assert( p == a.rank()); + static_assert( m != 0); + static_assert( p < m); + + if(na[m-1]!=nb[1]) throw std::invalid_argument("Error in boost::numeric::ublas::ttm: 2nd extent of B and m-th extent of A must be equal."); + + auto nc_base = na.base(); + auto wb = ublas::to_strides(nb,layout_type{}); + + std::get(nc_base) = std::get<0>(nb.base()); + + auto nc = extents_type(nc_base); + auto c = tensor_type(nc); + + assert(std::equal(begin(na) , begin(na)+m-1, begin(nc) )); + assert(std::equal(begin(na)+m, end (na), begin(nc)+m)); + assert(nc[m-1] == nb[0]); + + auto bbdata = &(b(0, 0)); + + auto const& wa = a.strides(); + auto const& wc = c.strides(); + + ttm(m, p, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + bbdata , nb.data(), wb.data()); + + return c; +} + + +//namespace detail { +//template< +// class TEL, +// class TER, +// class EL = typename TEL::extents_type, +// class ER = typename TER::extents_type +// > +//using enable_ttm_if_extent_is_static = +// std::enable_if_t && is_static_v, bool>; +//} // namespace detail + +//template +//inline decltype(auto) prod( tensor_core const& a, tensor_core const &b) +//{ +// using tensorA = tensor_core; +// using extentsA = typename tensorA::extents_type; +// using layout = typename tensorA::layout_type; +// using resizeable_tag = typename tensorA::resizable_tag; + +// static_assert(std::is_same_v ); +// static_assert(is_static_v); + +// constexpr auto p = size_v; + + +// auto const& na = a.extents(); +// auto const& nb = b.extents(); + +// static_assert( p != 0 ); +// static_assert( p == a.rank()); +// static_assert( m != 0); +// static_assert( p < m); + +// static_assert(get_v != get_v); + +// if(na[m-1]!=nb[1]) throw std::invalid_argument("Error in boost::numeric::ublas::ttm: 2nd extent of B and m-th extent of A must be equal."); + +// auto nc_base = na.base(); +// auto wb = ublas::to_strides(nb,layout{}); + +// std::get(nc_base) = std::get<0>(nb.base()); + +// auto nc = extents_type(nc_base); +// auto c = tensor_type(nc); + +// assert(std::equal(na.begin() , na.begin()+m-1, nc.begin())); +// assert(std::equal(na.begin()+m, na.end, nc.begin())); +// assert(nc[m-1] == nb[0]); + +// auto bbdata = &(b(0, 0)); + +// auto const& wa = a.strides(); +// auto const& wc = c.strides(); + +// ttm(m, p, +// c.data(), nc.data(), wc.data(), +// a.data(), na.data(), wa.data(), +// bbdata , nb.data(), wb.data()); + +// return c; +//} + + + +//using value_type = typename tensor_type::value_type; +//using container_type = typename tensor_type::container_type; +//using return_extents_type = std::decay_t; +//using return_container_type = rebind_storage_size_t; +//using return_tensor_type = tensor_core>; + +//auto c = return_tensor_type(value_type{}); + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp new file mode 100644 index 000000000..aed107711 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp @@ -0,0 +1,337 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TTT_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TTT_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../tags.hpp" +#include "../tensor.hpp" +#include "../type_traits.hpp" + + +namespace boost::numeric::ublas +{ + +template +struct tensor_engine; + +template +class tensor_core; + +template +class matrix; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +namespace detail +{ +/** Enables prod(ttt) if E1 or E1 is dynamic extents with static rank + * + * extents< > & extents + * extents & extents< > + * extents< > & extents< > + * + */ +template< + class TEA, + class TEB, + class EA = typename tensor_core< TEA >::extents_type, + class EB = typename tensor_core< TEB >::extents_type + > +using enable_ttt_if_one_extents_has_dynamic_rank = std::enable_if_t< + ( is_dynamic_rank_v || is_dynamic_rank_v) && + (!is_static_v || !is_static_v ) , bool >; +} // namespace detail +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phia[x]] = nb[phib[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phia one-based permutation tuple of length q for the first input tensor a can be of type std::vector or std::array + * @param[in] phib one-based permutation tuple of length q for the second input tensor b can be of type std::vector or std::array + * @result tensor with order r+s + */ +template = true > +inline decltype(auto) prod(tensor_core< TEA > const &a, + tensor_core< TEB > const &b, + std::vector const &phia, + std::vector const &phib) +{ + using tensorA_type = tensor_core< TEA >; + using tensorB_type = tensor_core< TEB >; + using extentsA_type = typename tensorA_type::extents_type; + using extentsB_type = typename tensorB_type::extents_type; + using layoutA_type = typename tensorA_type::layout_type; + using container_type = typename tensorA_type::container_type; + using resizableA_tag = typename tensorA_type::resizable_tag; + using resizableB_tag = typename tensorB_type::resizable_tag; + using valueA_type = typename tensorA_type::value_type; + using valueB_type = typename tensorB_type::value_type; + + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + + static_assert(is_dynamic_rank_v || is_dynamic_rank_v); + + + auto const pa = a.rank(); + auto const pb = b.rank(); + + auto const q = std::size_t{phia.size()}; + + if (pa == 0ul) throw std::runtime_error("error in ublas::prod(ttt): order of left-hand side tensor must be greater than 0."); + if (pb == 0ul) throw std::runtime_error("error in ublas::prod(ttt): order of right-hand side tensor must be greater than 0."); + if (pa < q) throw std::runtime_error("error in ublas::prod(ttt): number of contraction dimensions cannot be greater than the order of the left-hand side tensor."); + if (pb < q) throw std::runtime_error("error in ublas::prod(ttt): number of contraction dimensions cannot be greater than the order of the right-hand side tensor."); + if (q != phib.size()) throw std::runtime_error("error in ublas::prod(ttt): permutation tuples must have the same length."); + if (pa < phia.size()) throw std::runtime_error("error in ublas::prod(ttt): permutation tuple for the left-hand side tensor cannot be greater than the corresponding order."); + if (pb < phib.size()) throw std::runtime_error("error in ublas::prod(ttt): permutation tuple for the right-hand side tensor cannot be greater than the corresponding order."); + + auto const &na = a.extents(); + auto const &nb = b.extents(); + + for (auto i = 0ul; i < q; ++i) + if (na.at(phia.at(i) - 1) != nb.at(phib.at(i) - 1)) + throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct."); + + auto const r = pa - q; + auto const s = pb - q; + + auto phia1 = std::vector(pa); + auto phib1 = std::vector(pb); + std::iota(phia1.begin(), phia1.end(), std::size_t(1)); + std::iota(phib1.begin(), phib1.end(), std::size_t(1)); + + using dynamic_extents = std::conditional_t, extentsA_type, extentsB_type>; + using extents_base = typename dynamic_extents::base_type; + auto const size = std::size_t(pa+pb-2*q); + auto nc_base = extents_base (std::max(size,std::size_t{2}),std::size_t{1}); + + //for (auto i = 0ul; i < phia.size(); ++i) + for (auto p : phia) + *std::remove(phia1.begin(), phia1.end(), p) = p; + //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ; + + for (auto i = 0ul; i < r; ++i) + nc_base[i] = na[phia1[i] - 1]; + + //for (auto i = 0ul; i < phib.size(); ++i) + for (auto p : phib) + *std::remove(phib1.begin(), phib1.end(), p) = p; + //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ; + + for (auto i = 0ul; i < s; ++i) + nc_base[r + i] = nb[phib1[i] - 1]; + + assert(phia1.size() == pa); + assert(phib1.size() == pb); + + auto nc = dynamic_extents(nc_base); + + using return_tensor_type = tensor_core>; + auto c = return_tensor_type( nc, valueA_type{} ); + + ttt(pa, pb, q, + phia1.data(), phib1.data(), + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + b.data(), b.extents().data(), b.strides().data()); + + return c; +} + + + +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phi[x]] = nb[phi[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phi one-based permutation tuple of length q for both input + * tensors can be of type std::vector or std::array + * @result tensor with order r+s + */ +template = true > +inline decltype(auto) prod(tensor_core const &a, + tensor_core const &b, + std::vector const &phi) +{ + return prod(a, b, phi, phi); +} + + + +namespace detail +{ + +/** Enables if extents E1, E1 are dynamic extents with static rank + * + * extents & extents + * + */ +template< + class TE1, + class TE2, + class E1 = typename tensor_core< TE1 >::extents_type, + class E2 = typename tensor_core< TE2 >::extents_type + > +using enable_ttt_if_extents_have_static_rank = std::enable_if_t< + (is_static_rank_v && is_dynamic_v) && + (is_static_rank_v && is_dynamic_v) , bool>; + +} // namespace detail + +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phia[x]] = nb[phib[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phia one-based permutation tuple of length q for the first input tensor a + * @param[in] phib one-based permutation tuple of length q for the second input tensor b + * @result tensor with order r+s + */ +template = true > +inline auto prod(tensor_core const &a, + tensor_core const &b, + std::array const &phia, + std::array const &phib) +{ + using tensorA_type = tensor_core; + using tensorB_type = tensor_core; + using extentsA_type = typename tensorA_type::extents_type; + using extentsB_type = typename tensorB_type::extents_type; + using valueA_type = typename tensorA_type::value_type; + using valueB_type = typename tensorB_type::value_type; + using layout_type = typename tensorA_type::layout_type; + using container_type = typename tensorA_type::container_type; + using resizeableA_tag = typename tensorA_type::resizable_tag; + using resizeableB_tag = typename tensorB_type::resizable_tag; + + + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + + constexpr auto q = Q; + constexpr auto pa = std::tuple_size_v; + constexpr auto pb = std::tuple_size_v; + + static_assert(pa != 0); + static_assert(pb != 0); + static_assert(pa >= q); + static_assert(pb >= q); + +// if (pa < phia.size()) throw std::runtime_error("error in ublas::prod: permutation tuple for the left-hand side tensor cannot be greater than the corresponding order."); +// if (pb < phib.size()) throw std::runtime_error("error in ublas::prod: permutation tuple for the right-hand side tensor cannot be greater than the corresponding order."); + + auto const &na = a.extents(); + auto const &nb = b.extents(); + + for (auto i = 0ul; i < q; ++i) + if (na.at(phia.at(i) - 1) != nb.at(phib.at(i) - 1)) + throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct."); + + constexpr auto r = pa - q; + constexpr auto s = pb - q; + + auto phia1 = std::array{}; + auto phib1 = std::array{}; + std::iota(phia1.begin(), phia1.end(),std::size_t(1)); + std::iota(phib1.begin(), phib1.end(),std::size_t(1)); + + constexpr auto const msz = std::max(std::size_t(r+s), std::size_t(2)); + using return_extents_type = extents; + auto nc_base = std::array{}; + + for (auto i = 0ul; i < phia.size(); ++i) + *std::remove(phia1.begin(), phia1.end(), phia.at(i)) = phia.at(i); + //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ; + + for (auto i = 0ul; i < phib.size(); ++i) + *std::remove(phib1.begin(), phib1.end(), phib.at(i)) = phib.at(i); + //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ; + + for (auto i = 0ul; i < r; ++i) + nc_base[i] = na[phia1[i] - 1]; + + for (auto i = 0ul; i < s; ++i) + nc_base[r+i] = nb[phib1[i] - 1]; + + auto nc = return_extents_type(nc_base); + + using return_tensor_type = tensor_core>; + + auto c = return_tensor_type( nc ); + + ttt(pa, pb, q, + phia1.data(), phib1.data(), + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + b.data(), b.extents().data(), b.strides().data()); + + return c; +} + + +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phi[x]] = nb[phi[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phi one-based permutation tuple of length q for both input + * tensors can be of type std::vector or std::array + * @result tensor with order r+s + */ +template * = nullptr > +inline decltype(auto) prod(tensor_core const &a, + tensor_core const &b, + std::array const &phi) +{ + return prod(a, b, phi, phi); +} + + + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp new file mode 100644 index 000000000..82c9b3c41 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp @@ -0,0 +1,240 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TTV_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TTV_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" + +namespace boost::numeric::ublas +{ + + +template +struct tensor_engine; + +template +class tensor_core; + +//template +//class vector; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +namespace detail { + +/** Enables if extent E is dynamic with dynamic rank: extents< > */ +template< + class TE, + class E = typename tensor_core::extents_type + > +using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t, bool>; + +} // namespace detail + + +/** @brief Computes the m-mode tensor-times-vector product + * + * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] + * + * @note calls ublas::ttv + * + * @param[in] m contraction dimension with 1 <= m <= p + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p-1, the same storage format and allocator type as A + */ +template ::value, + detail::enable_ttv_if_extent_has_dynamic_rank = true > +inline decltype(auto) prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) +{ + + using tensor = tensor_core< TE >; + using shape = typename tensor::extents_type; + using value = typename tensor::value_type; + using layout = typename tensor::layout_type; + using resize_tag = typename tensor::resizable_tag; + + auto const p = a.rank(); + + static_assert(std::is_same_v); + static_assert(is_dynamic_v); + + if (m == 0ul) throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero."); + if (p < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode."); + if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); + if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); + + auto const& na = a.extents(); + auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)}; + auto wb = ublas::to_strides(nb,layout{} ); + + auto const sz = std::max( std::size_t(ublas::size(na)-1u), std::size_t(2) ); + auto nc_base = typename shape::base_type(sz,1); + + for (auto i = 0ul, j = 0ul; i < p; ++i) + if (i != m - 1) + nc_base[j++] = na.at(i); + + auto nc = shape(nc_base); + + + auto c = tensor( nc, value{} ); + auto const* bb = &(b(0)); + ttv(m, p, + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + bb, nb.data(), wb.data()); + return c; +} + + +namespace detail { +/** Enables if extent E is dynamic with static rank: extents */ +template< + class TE, + class E = typename tensor_core< TE >::extents_type + > +using enable_ttv_if_extent_is_dynamic_with_static_rank = + std::enable_if_t< is_static_rank_v< E > && is_dynamic_v< E >, bool>; + +} // namespace detail + + +/** @brief Computes the m-mode tensor-times-vector product + * + * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] + * + * @note calls ublas::ttv + * + * @param[in] m contraction dimension with 1 <= m <= p + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p-1, the same storage format and allocator type as A + */ +template ::value, + detail::enable_ttv_if_extent_is_dynamic_with_static_rank = true + > +inline auto prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) +{ + using tensor = tensor_core< TE >; + using shape = typename tensor::extents_type; + using container = typename tensor::container_type; + using layout = typename tensor::layout_type; + using resizeable_tag = typename tensor::resizable_tag; + + constexpr auto p = std::tuple_size_v; + constexpr auto sz = std::max(std::size_t(std::tuple_size_v-1U),std::size_t(2)); + + using shape_b = ublas::extents<2>; + using shape_c = ublas::extents; + using tensor_c = tensor_core>; + + static_assert(std::is_same_v); + + if (m == 0ul) throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero."); + if (p < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the modus."); + if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); + if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); + + auto const& na = a.extents(); + + auto nc_base = typename shape_c::base_type{}; + std::fill(nc_base.begin(), nc_base.end(),std::size_t(1)); + for (auto i = 0ul, j = 0ul; i < p; ++i) + if (i != m - 1) + nc_base[j++] = na.at(i); + + auto nc = shape_c(std::move(nc_base)); + auto nb = shape_b{b.size(),1UL}; + auto wb = ublas::to_strides(nb,layout{}); + auto c = tensor_c( std::move(nc) ); + auto const* bb = &(b(0)); + + ttv(m, p, + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + bb, nb.data(), wb.data() ); + return c; +} + + + +/** @brief Computes the m-mode tensor-times-vector product + * + * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] + * + * @note calls ublas::ttv + * + * @tparam M contraction dimension with 1 <= m <= p + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p-1, the same storage format and allocator type as A + */ + +template ::value> +inline auto prod( tensor_core< TE > const &a, vector const &b) +{ + using tensor = tensor_core< TE >; + using container = typename tensor::container; + using shape = typename tensor::extents; + using layout = typename tensor::layout; + using shape_b = extents<2>; + using shape_c = remove_element_t; + using container_c = rebind_storage_size_t; + using tensor_c = tensor_core>; + + static_assert( m != 0ul ); + static_assert(std::tuple_size_v != 0 ); + static_assert(std::tuple_size_v >= m ); + + constexpr auto p = std::tuple_size_v; + + if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); + if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); + + auto const& na = a.extents(); + + auto nc = shape_c{}; + auto nb = shape_b{std::size_t(b.size()),std::size_t(1)}; + + auto c = tensor_c{}; + auto const* bb = &(b(0)); + + auto const& wa = a.strides(); + auto const& wc = c.strides(); + auto wb = ublas::to_strides(nb,layout{}); + + ttv(m, p, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + bb, nb.data(), wb.data()); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/trans.hpp b/include/boost/numeric/ublas/tensor/function/trans.hpp new file mode 100644 index 000000000..e328b4327 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/trans.hpp @@ -0,0 +1,78 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TRANS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TRANS_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../multiplication.hpp" + + +namespace boost::numeric::ublas +{ +template +class tensor_core; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Transposes a tensor according to a permutation tuple + * + * Implements C[tau[i1],tau[i2]...,tau[ip]] = A[i1,i2,...,ip] + * + * @note calls trans function + * + * @param[in] a tensor object of rank p + * @param[in] tau one-based permutation tuple of length p + * @returns a transposed tensor object with the same storage format F and allocator type A + */ +template > +inline decltype(auto) trans(tensor_core< TensorEngine > const &a, PermuType const &tau) +{ + + using tensor_type = tensor_core< TensorEngine >; + using extents_type = typename tensor_type::extents_type; + + static_assert( is_dynamic_v< extents_type > ); + + auto const p = a.rank(); + auto const &na = a.extents(); + typename extents_type::base_type nc; + + if constexpr( is_dynamic_rank_v ){ + nc.resize(p); + } + + for (auto i = 0u; i < p; ++i){ + nc.at(tau.at(i) - 1) = na.at(i); + } + + auto c = tensor_type( extents_type( std::move(nc) ) ); + + if (a.empty()){ + return c; + } + + trans(a.rank(), a.extents().data(), tau.data(), + c.data(), c.strides().data(), + a.data(), a.strides().data()); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/functions.hpp b/include/boost/numeric/ublas/tensor/functions.hpp index d77b6a9a4..8029d59a1 100644 --- a/include/boost/numeric/ublas/tensor/functions.hpp +++ b/include/boost/numeric/ublas/tensor/functions.hpp @@ -1,1101 +1,27 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -#ifndef _BOOST_UBLAS_TENSOR_FUNCTIONS_HPP_ -#define _BOOST_UBLAS_TENSOR_FUNCTIONS_HPP_ - -#include -#include - -namespace boost::numeric::ublas{ - - template class tensor_core; - -} // namespace boost::numeric::ublas - - -namespace boost::numeric::ublas -{ - namespace detail{ - - template - struct is_complex : std::false_type{}; - - template - struct is_complex< std::complex > : std::true_type{}; - - template - inline static constexpr bool is_complex_v = is_complex::value; - - /// To check if the type is the std::array or not. - /// Can be extented by providing specialization. - /// Point to Remember: C-Style arrays are not supported. - template - struct is_bounded_array : std::false_type{}; - - template - inline static constexpr bool is_bounded_array_v = is_bounded_array::value; - - template - struct is_bounded_array> : std::true_type{}; - - /// Gives the extent of rank one std::array. - /// Similar to is_bounded_array, it can also be - /// extented using specialization. - /// Custom Type should have similar APIs to - /// std::array. - /// Point to Remember: C-Style arrays are not supported. - template - struct extent_of_rank_one_array; - - template - struct extent_of_rank_one_array> - : std::integral_constant - {}; - - template - inline static constexpr bool extent_of_rank_one_array_v = extent_of_rank_one_array::value; - - } // namespace detail - - /** @brief Computes the m-mode tensor-times-vector product - * - * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] - * - * @note calls ublas::ttv - * - * @param[in] m contraction dimension with 1 <= m <= p - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * - * @returns tensor object C with order p-1, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod( tensor_core< TensorEngine > const &a, - vector::value_type, A> const &b, - const std::size_t m) - { - using tensor_type = tensor_core< TensorEngine >; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using array_type = typename tensor_type::array_type; - using size_type = typename extents_type::size_type; - using layout_type = typename tensor_type::layout_type; - - auto const p = a.rank(); - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::prod(tensor_core const&, vector const& ): " - "tensor container should be resizable" - ); - - static_assert( - is_dynamic_v, - "error in boost::numeric::ublas::prod(tensor_core const&, vector const& ): " - "extents type should be dynamic" - ); - - if (m == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): " - "contraction mode must be greater than zero."); - - if (p < m) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): rank of tensor must be " - "greater than or equal to the modus."); - - if (a.empty()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): first " - "argument tensor should not be empty."); - - if (b.empty()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): second " - "argument vector should not be empty."); - - using extents_value_type = typename extents_type::value_type; - - auto a_extents = a.extents(); - - auto extents_result = [&a_extents](){ - using size_type = typename extents_type::size_type; - if constexpr( is_static_rank_v ){ - // To disable the warning for unused variable; - (void)a_extents; - constexpr size_type esz = extents_type::_size - 1u; - constexpr auto sz = std::max( esz, size_type(2) ); - auto ret = extents< sz >(); - ret.fill(1u); - return ret; - }else{ - using extents_base_type = typename extents_type::base_type; - auto const sz = std::max( a_extents.size() - 1, size_type(2) ); - auto arr = extents_base_type(sz,1); - return extents_type{ std::move(arr) } ; - } - }; - - auto nc = extents_result(); - auto nb = std::vector{b.size(), extents_value_type(1)}; - - for (auto i = size_type(0), j = size_type(0); i < p; ++i) - if (i != m - 1) - nc[j++] = a_extents.at(i); - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core( nc, value_type{} ); - auto bb = &(b(0)); - ttv(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), nb.data()); - return c; - } - - /** @brief Computes the m-mode tensor-times-matrix product - * - * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] - * - * @note calls ublas::ttm - * - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * @param[in] m contraction dimension with 1 <= m <= p - * - * @returns tensor object C with order p, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod( tensor_core< TensorEngine > const &a, - matrix::value_type, typename tensor_core< TensorEngine >::layout_type , A> const &b, - const std::size_t m) - { - - using tensor_type = tensor_core< TensorEngine >; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - using array_type = typename tensor_type::array_type; - using dynamic_strides_type = basic_strides; - - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::prod(tensor_core const&, matrix const& ): " - "tensor container should be resizable" - ); - - static_assert( - is_dynamic_v, - "error in boost::numeric::ublas::prod(tensor_core const&, matrix const& ): " - "extents type should be dynamic" - ); - - auto const p = a.rank(); - - if (m == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): " - "contraction mode must be greater than zero."); - - if (p < m || m > a.extents().size()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): rank " - "of the tensor must be greater equal the modus."); - - if (a.empty()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): first " - "argument tensor should not be empty."); - - if (b.size1() * b.size2() == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): second " - "argument matrix should not be empty."); - - auto nc = a.extents(); - auto nb = extents<>{b.size1(), b.size2()}; - - auto wb = dynamic_strides_type(nb); - - nc[m - 1] = nb[0]; - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core(nc, value_type{}); - - auto bb = &(b(0, 0)); - ttm(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), wb.data()); - - return c; - } - - /** @brief Computes the q-mode tensor-times-tensor product - * - * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) - * - * @note calls ublas::ttt - * - * na[phia[x]] = nb[phib[x]] for 1 <= x <= q - * - * @param[in] a left-hand side tensor with order r+q - * @param[in] b right-hand side tensor with order s+q - * @param[in] phia one-based permutation tuple of length q for the first - * input tensor a can be of type std::vector or std::array - * @param[in] phib one-based permutation tuple of length q for the second - * input tensor b can be of type std::vector or std::array - * @result tensor with order r+s - */ - template , - std::enable_if_t< - !( is_static_v::extents_type> || - is_static_v::extents_type> ) - ,int> = 0 - > - inline decltype(auto) prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b, - PermuType const &phia, PermuType const &phib) - { - using tensor_type = tensor_core< TensorEngine1 >; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - using extents_size_type = typename extents_type::size_type; - using array_type = typename tensor_type::array_type; - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - typename tensor_core::resizable_tag - > && - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::prod(tensor_core const&, tensor_core const&, " - "PermuType const&, PermuType const& ): " - "Both the tensor storage should have the same type of storage and both should be resizable" - ); - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&, " - "PermuType const&, PermuType const&): " - "Both the tensor should have the same value_type" - ); - - auto const pa = a.rank(); - auto const pb = b.rank(); - - auto const q = static_cast(phia.size()); - - if (pa == 0ul) - throw std::runtime_error("error in ublas::prod: order of left-hand side tensor must be greater than 0."); - if (pb == 0ul) - throw std::runtime_error("error in ublas::prod: order of right-hand side tensor must be greater than 0."); - if (pa < q) - throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the left-hand side tensor."); - if (pb < q) - throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the right-hand side tensor."); - - if (q != phib.size()) - throw std::runtime_error("error in ublas::prod: permutation tuples must have the same length."); - - if (pa < phia.size()) - throw std::runtime_error("error in ublas::prod: permutation tuple for the left-hand side tensor cannot be greater than the corresponding order."); - if (pb < phib.size()) - throw std::runtime_error("error in ublas::prod: permutation tuple for the right-hand side tensor cannot be greater than the corresponding order."); - - auto const &na = a.extents(); - auto const &nb = b.extents(); - - for (auto i = 0ul; i < q; ++i) - if (na.at(phia.at(i) - 1) != nb.at(phib.at(i) - 1)) - throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct."); - - std::size_t const r = pa - q; - std::size_t const s = pb - q; - - std::vector phia1(pa); - std::vector phib1(pb); - std::iota(phia1.begin(), phia1.end(), 1ul); - std::iota(phib1.begin(), phib1.end(), 1ul); - - auto extents_result = [&e1 = na, &e2 = nb, &a1 = phia, &a2 = phib](){ - using lextents_type = std::decay_t< decltype(e1) >; - using rextents_type = std::decay_t< decltype(e2) >; - using array_type = std::decay_t< decltype(a1) >; - if constexpr( - detail::is_bounded_array_v && - is_static_rank_v && - is_static_rank_v - ){ - constexpr auto const N = detail::extent_of_rank_one_array_v; - constexpr auto const sz = lextents_type::_size + rextents_type::_size - 2 * N; - auto res = extents(); - res.fill(1u); - return res; - }else{ - extents_size_type const size = ( e1.size() + e2.size() ) - ( a1.size() + a2.size() ); - using extents_base_type = typename extents<>::base_type; - auto arr = extents_base_type( std::max(size, extents_size_type(2)), 1u ); - return extents<>(std::move(arr)); - } - }; - - auto nc = extents_result(); - - for (auto i = 0ul; i < phia.size(); ++i) - *std::remove(phia1.begin(), phia1.end(), phia.at(i)) = phia.at(i); - - //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ; - - for (auto i = 0ul; i < r; ++i) - nc[i] = na[phia1[i] - 1]; - - for (auto i = 0ul; i < phib.size(); ++i) - *std::remove(phib1.begin(), phib1.end(), phib.at(i)) = phib.at(i); - //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ; - - for (auto i = 0ul; i < s; ++i) - nc[r + i] = nb[phib1[i] - 1]; - - // std::copy( phib.begin(), phib.end(), phib1.end() ); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(phia1.size() == pa); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(phib1.size() == pb); - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core( nc, value_type{} ); - - ttt(pa, pb, q, - phia1.data(), phib1.data(), - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - b.data(), b.extents().data(), b.strides().data()); - - return c; - } - - // template - // auto operator*( tensor_index const& lhs, tensor_index - // const& rhs) - - /** @brief Computes the q-mode tensor-times-tensor product - * - * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) - * - * @note calls ublas::ttt - * - * na[phi[x]] = nb[phi[x]] for 1 <= x <= q - * - * @param[in] a left-hand side tensor with order r+q - * @param[in] b right-hand side tensor with order s+q - * @param[in] phi one-based permutation tuple of length q for bot input - * tensors can be of type std::vector or std::array - * @result tensor with order r+s - */ - template > - inline decltype(auto) prod( - tensor_core< TensorEngine1 > const &a, - tensor_core< TensorEngine2 > const &b, - PermuType const &phi) - { - return prod(a, b, phi, phi); - } - - /** @brief Computes the inner product of two tensors * - * Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,jp]) - * - * @note calls inner function - * - * @param[in] a tensor object A - * @param[in] b tensor object B - * - * @returns a value type. - */ - template - inline decltype(auto) inner_prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b) - { - - - using value_type = typename tensor_core< TensorEngine1 >::value_type; - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::inner_prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&): " - "Both the tensor should have the same value_type" - ); - - if (a.rank() != b.rank()) - throw std::length_error("error in boost::numeric::ublas::inner_prod: Rank of both the tensors must be the same."); - - if (a.empty() || b.empty()) - throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensors should not be empty."); - - if (a.extents() != b.extents()) - throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensor extents should be the same."); - - return inner(a.rank(), a.extents().data(), - a.data(), a.strides().data(), - b.data(), b.strides().data(), value_type{0}); - } - - /** @brief Computes the outer product of two tensors - * - * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] - * - * @note calls outer function - * - * @param[in] a tensor object A - * @param[in] b tensor object B - * - * @returns tensor object C with the same storage format F and allocator type A1 - */ - template ::extents_type> || - is_static_v::extents_type> ) - ,int> = 0 - > - inline decltype(auto) outer_prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b) - { - using tensor_type = tensor_core< TensorEngine1 >; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - using array_type = typename tensor_type::array_type; - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - typename tensor_core::resizable_tag - > && - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::outer_prod(tensor_core const&, tensor_core const&): " - "Both the tensor storage should have the same type of storage and both should be resizable" - ); - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::outer_prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&): " - "Both the tensor should have the same value_type" - ); - - if (a.empty() || b.empty()) - throw std::runtime_error( - "error in boost::numeric::ublas::outer_prod: " - "tensors should not be empty."); - - auto extents_result = [&e1 = a.extents(), &e2 = b.extents()](){ - using lextents_type = std::decay_t< decltype(e1) >; - using rextents_type = std::decay_t< decltype(e2) >; - - if constexpr( is_static_rank_v && is_static_rank_v ){ - return extents< lextents_type::_size + rextents_type::_size >{}; - }else { - using extents_base_type = typename extents<>::base_type; - auto arr = extents_base_type( e1.size() + e2.size(), 1 ); - return extents<>{std::move(arr)}; - } - }; - - auto nc = extents_result(); - - auto a_extents = a.extents(); - auto b_extents = b.extents(); - - for(auto i = 0u; i < a.rank(); ++i) - nc.at(i) = a_extents.at(i); - - for(auto i = 0u; i < b.rank(); ++i) - nc.at(a.rank()+i) = b_extents.at(i); - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core( nc, value_type{} ); - - outer(c.data(), c.rank(), c.extents().data(), c.strides().data(), - a.data(), a.rank(), a_extents.data(), a.strides().data(), - b.data(), b.rank(), b_extents.data(), b.strides().data()); - - return c; - } - - /** @brief Transposes a tensor according to a permutation tuple - * - * Implements C[tau[i1],tau[i2]...,tau[ip]] = A[i1,i2,...,ip] - * - * @note calls trans function - * - * @param[in] a tensor object of rank p - * @param[in] tau one-based permutation tuple of length p - * @returns a transposed tensor object with the same storage format F and allocator type A - */ - template > - inline decltype(auto) trans(tensor_core< TensorEngine > const &a, PermuType const &tau) - { - - using tensor_type = tensor_core< TensorEngine >; - using layout_type = typename tensor_type::layout_type; - using array_type = typename tensor_type::array_type; - using extents_type = typename tensor_type::extents_type; - - static_assert( - is_dynamic_v< extents_type > , - "error in boost::numeric::ublas::trans(tensor_core< TensorEngine > const &a, " - "PermuType const &tau): " - "Tensor should have dynamic extents" - ); - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - array_type - >; - - auto const p = a.rank(); - auto const &na = a.extents(); - typename extents_type::base_type nc; - - if constexpr( is_dynamic_rank_v ){ - nc.resize(p); - } - - for (auto i = 0u; i < p; ++i) - nc.at(tau.at(i) - 1) = na.at(i); - - auto c = tensor_core( extents_type( std::move(nc) ) ); - - if (a.empty()) - return c; - - trans(a.rank(), a.extents().data(), tau.data(), - c.data(), c.strides().data(), - a.data(), a.strides().data()); - - return c; - } - /** - * - * @brief Computes the frobenius nor of a tensor - * - * @note Calls accumulate on the tensor. - * - * implements - * k = sqrt( sum_(i1,...,ip) A(i1,...,ip)^2 ) - * - * @tparam V the data type of tensor - * @tparam F the format of tensor storage - * @tparam A the array_type of tensor - * @param a the tensor whose norm is expected of rank p. - * @return the frobenius norm of a tensor. - */ - template - inline decltype(auto) norm(tensor_core< TensorEngine > const &a) - { - using tensor_type = tensor_core< TensorEngine >; - using value_type = typename tensor_type::value_type; - - static_assert(std::is_default_constructible::value, - "Value type of tensor must be default construct able in order " - "to call boost::numeric::ublas::norm"); - - if (a.empty()) - { - throw std::runtime_error( - "error in boost::numeric::ublas::norm: tensors should not be empty."); - } - return std::sqrt(accumulate(a.order(), a.extents().data(), a.data(), a.strides().data(), value_type{}, - [](auto const &l, auto const &r) { return l + r * r; })); - } - - - /** @brief Computes the complex conjugate component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template::value_type>, int > = 0 - > - auto conj(detail::tensor_expression< tensor_core, D > const& expr) - { - return detail::make_unary_tensor_expression< tensor_core > (expr(), [] (auto const& l) { return std::conj( l ); } ); - } - - /** @brief Computes the complex conjugate component of tensor elements within a tensor expression - * - * @param[in] expr tensor expression - * @returns complex tensor - */ - template - auto conj(detail::tensor_expression const& expr) - { - using old_tensor_type = T; - using value_type = typename old_tensor_type::value_type; - using layout_type = typename old_tensor_type::layout_type; - using array_type = typename old_tensor_type::array_type; - using extents_type = typename old_tensor_type::extents_type; - - using complex_type = std::complex; - using storage_traits_t = storage_traits; - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - typename storage_traits_t::template rebind - >; - - using tensor_type = tensor_core; - - if( detail::retrieve_extents( expr ).empty() ) - throw std::runtime_error("error in boost::numeric::ublas::conj: tensors should not be empty."); - - auto a = old_tensor_type( expr ); - auto c = tensor_type( a.extents() ); - - std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::conj(l) ; } ); - - return c; - } - - /** @brief Extract the real component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template - auto real(detail::tensor_expression const& expr) { - return detail::make_unary_tensor_expression (expr(), [] (auto const& l) { return std::real( l ); } ); - } - - /** @brief Extract the real component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template::value_type>, int > = 0 - > - auto real(detail::tensor_expression< tensor_core< TensorEngine > ,D > const& expr) - { - - using old_tensor_type = tensor_core< TensorEngine >; - using complex_type = typename old_tensor_type::value_type; - using value_type = typename complex_type::value_type; - using layout_type = typename old_tensor_type::layout_type; - using array_type = typename old_tensor_type::array_type; - using extents_type = typename old_tensor_type::extents_type; - using storage_traits_t = storage_traits; - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - typename storage_traits_t::template rebind - >; - - using tensor_type = tensor_core; - - if( detail::retrieve_extents( expr ).empty() ) - throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); - - auto a = old_tensor_type( expr ); - auto c = tensor_type( a.extents() ); - - std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::real(l) ; } ); - - return c; - } - - - /** @brief Extract the imaginary component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template - auto imag(detail::tensor_expression const& lhs) { - return detail::make_unary_tensor_expression (lhs(), [] (auto const& l) { return std::imag( l ); } ); - } - - - /** @brief Extract the imag component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template::value_type>, int > = 0 - > - auto imag(detail::tensor_expression< tensor_core< TensorEngine > ,D> const& expr) - { - using old_tensor_type = tensor_core< TensorEngine >; - using complex_type = typename old_tensor_type::value_type; - using value_type = typename complex_type::value_type; - using layout_type = typename old_tensor_type::layout_type; - using array_type = typename old_tensor_type::array_type; - using extents_type = typename old_tensor_type::extents_type; - using storage_traits_t = storage_traits; - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - typename storage_traits_t::template rebind - >; - - using tensor_type = tensor_core; - - if( detail::retrieve_extents( expr ).empty() ) - throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); - - auto a = old_tensor_type( expr ); - auto c = tensor_type( a.extents() ); - - std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::imag(l) ; } ); - - return c; - } - -} - -// static functions -namespace boost::numeric::ublas -{ - - namespace detail{ - - template - inline - constexpr auto extents_result_tensor_times_vector( - [[maybe_unused]] basic_static_extents e, - [[maybe_unused]] basic_static_extents te1, - [[maybe_unused]] basic_static_extents te2) - { - return basic_static_extents{}; - } - - template - inline - constexpr auto extents_result_tensor_times_vector( - [[maybe_unused]] basic_static_extents e1, - [[maybe_unused]] basic_static_extents e2, - [[maybe_unused]] basic_static_extents e3 = basic_static_extents{}) - { - if constexpr(I != M - 1){ - return extents_result_tensor_times_vector - ( basic_static_extents{}, basic_static_extents{}, basic_static_extents{} ); - }else{ - return extents_result_tensor_times_vector - ( basic_static_extents{}, basic_static_extents{}, basic_static_extents{} ); - } - } - - - template - inline - constexpr auto extents_result_tensor_times_vector(basic_static_extents const& e) - { - using size_type = typename basic_static_extents::size_type; - auto ones = typename impl::make_sequence_of_ones_t< T, std::max( size_type(2), sizeof...(E) ) >::extents_type{}; - return extents_result_tensor_times_vector(e, ones); - } - - template - inline - constexpr auto static_extents_set_at( - [[maybe_unused]] basic_static_extents const& e1, - [[maybe_unused]] basic_static_extents e2 = basic_static_extents{} - ){ - static_assert( I < sizeof...(E) + 1, "boost::numeric::ublas::detail::static_extents_set_at(): out of bound"); - if constexpr( sizeof...(E) == 0 ){ - if constexpr( I == 0 ){ - return basic_static_extents{}; - }else{ - return basic_static_extents{}; - } - }else{ - if constexpr(I == 0){ - return basic_static_extents{}; - }else{ - return static_extents_set_at( basic_static_extents{}, basic_static_extents{} ); - } - } - } - - } // namespace detail - - /** @brief Computes the m-mode tensor-times-vector product - * - * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] - * - * @note calls ublas::ttv - * - * @tparam M contraction dimension with 1 <= m <= p - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * - * @returns tensor object C with order p-1, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod(tensor_core< TensorType > const &a - , vector::value_type, A> const &b) - { - using tensor_type = tensor_core< TensorType >; - using array_type = typename tensor_type::array_type; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - - auto const p = std::size_t(a.rank()); - - static_assert( M != 0ul, - "error in boost::numeric::ublas::prod(ttv): " - "contraction mode must be greater than zero."); - - static_assert( extents_type::_size >= M, - "error in boost::numeric::ublas::prod(ttv): rank of tensor must be " - "greater than or equal to the modus."); - - static_assert(extents_type::_size != 0, - "error in boost::numeric::ublas::prod(ttv): first " - "argument tensor should not be empty."); - - if (b.size() == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): second " - "argument vector should not be empty."); - - using extents_value_type = typename extents_type::value_type; - - auto nc = detail::extents_result_tensor_times_vector(a.extents()); - auto nb = std::vector{b.size(), extents_value_type(1)}; - using c_extents_type = std::decay_t; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - rebind_storage_size_t - >; - - auto c = t_engine(value_type{}); - auto bb = &(b(0)); - - auto& a_static_extents = a.extents().base(); - auto& c_static_extents = c.extents().base(); - - auto& a_static_strides = a.strides().base(); - auto& c_static_strides = c.strides().base(); - - ttv(M, p, - c.data(), c_static_extents.data(), c_static_strides.data(), - a.data(), a_static_extents.data(), a_static_strides.data(), - bb, nb.data(), nb.data()); - - return c; - } - - /** @brief Computes the m-mode tensor-times-matrix product - * - * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] - * - * @note calls ublas::ttm - * - * @tparam M contraction dimension with 1 <= M <= p - * @tparam MatrixDimension is a non contracting dimension - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * - * @returns tensor object C with order p, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod(tensor_core< TensorType > const &a, - matrix::value_type, typename tensor_core< TensorType >::layout_type, A> const &b) - { - using tensor_type = tensor_core< TensorType >; - using extents_type = typename tensor_type::extents_type; - using layout_type = typename tensor_type::layout_type; - using value_type = typename tensor_type::value_type; - using array_type = typename tensor_type::array_type; - using dynamic_strides_type = strides_t, layout_type>; - - auto const p = a.rank(); - - static_assert(M != 0ul, - "error in boost::numeric::ublas::prod(ttm): " - "contraction mode must be greater than zero."); - - static_assert( extents_type::_size >= M , - "error in boost::numeric::ublas::prod(ttm): rank " - "of the tensor must be greater equal the modus."); - - static_assert( extents_type::_size, - "error in boost::numeric::ublas::prod(ttm): first " - "argument tensor should not be empty."); - - if (b.size1() * b.size2() == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): second " - "argument matrix should not be empty."); - - auto nc = detail::static_extents_set_at< M - 1, MatrixDimension >( a.extents() ); - auto nb = extents<>{b.size1(), b.size2()}; - - auto wb = dynamic_strides_type(nb); - - using c_extents_type = std::decay_t; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - rebind_storage_size_t - >; - auto c = t_engine(value_type{}); - - auto bb = &(b(0, 0)); - - auto& a_static_extents = a.extents().base(); - auto& c_static_extents = c.extents().base(); - - auto& a_static_strides = a.strides().base(); - auto& c_static_strides = c.strides().base(); - ttm(M, p, - c.data(), c_static_extents.data(), c_static_strides.data(), - a.data(), a_static_extents.data(), a_static_strides.data(), - bb, nb.data(), wb.data()); - - return c; - } - - /** @brief Computes the outer product of two tensors - * - * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] - * - * @note calls outer function - * - * @param[in] a tensor object A - * @param[in] b tensor object B - * - * @returns tensor object C with the same storage format F and allocator type A1 - */ - template ::extents_type > && - is_static_v< typename tensor_core< TensorEngine2 >::extents_type > - ,int> = 0 - > - inline decltype(auto) outer_prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b) - { - if (a.empty() || b.empty()) - throw std::runtime_error( - "error in boost::numeric::ublas::outer_prod: " - "tensors should not be empty."); - - using extents_type1 = std::decay_t< decltype(a.extents()) >; - using extents_type2 = std::decay_t< decltype(b.extents()) >; - using array_type = typename tensor_core< TensorEngine1 >::array_type; - using value_type = typename tensor_core< TensorEngine1 >::value_type; - using layout_type = typename tensor_core< TensorEngine1 >::layout_type; - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::outer_prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&): " - "Both the tensor should have the same value_type" - ); - - auto nc = detail::impl::concat_t{}; - - auto a_extents = a.extents(); - auto b_extents = b.extents(); - - - using c_extents_type = std::decay_t; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - rebind_storage_size_t - >; - - auto c = t_engine(value_type{}); - - auto& a_static_extents = a_extents.base(); - auto& a_static_strides = a.strides().base(); - - auto& b_static_extents = b_extents.base(); - auto& b_static_strides = b.strides().base(); - - auto c_static_extents = c.extents().base(); - auto c_static_strides = c.strides().base(); - - outer(c.data(), c.rank(), c_static_extents.data(), c_static_strides.data(), - a.data(), a.rank(), a_static_extents.data(), a_static_strides.data(), - b.data(), b.rank(), b_static_extents.data(), b_static_strides.data()); - - return c; - } -} +#ifndef BOOST_UBLAS_TENSOR_FUNCTIONS_HPP +#define BOOST_UBLAS_TENSOR_FUNCTIONS_HPP + +#include "function/reshape.hpp" +#include "function/inner_prod.hpp" +#include "function/outer_prod.hpp" +#include "function/norm.hpp" +#include "function/imag.hpp" +#include "function/real.hpp" +#include "function/conj.hpp" +#include "function/trans.hpp" +#include "function/tensor_times_vector.hpp" +#include "function/tensor_times_matrix.hpp" +#include "function/tensor_times_tensor.hpp" +#include "function/init.hpp" #endif diff --git a/include/boost/numeric/ublas/tensor/index.hpp b/include/boost/numeric/ublas/tensor/index.hpp index 7de9e52e7..f13872ceb 100644 --- a/include/boost/numeric/ublas/tensor/index.hpp +++ b/include/boost/numeric/ublas/tensor/index.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,14 +13,11 @@ #define BOOST_UBLAS_TENSOR_INDEX_HPP -#include #include +#include #include -namespace boost { -namespace numeric { -namespace ublas { -namespace index { +namespace boost::numeric::ublas::index { /** @brief Proxy template class for the einstein summation notation * @@ -80,10 +77,7 @@ static constexpr index_type<24> _x; static constexpr index_type<25> _y; static constexpr index_type<26> _z; -} // namespace indices +} // namespace boost::numeric::ublas::index -} -} -} #endif // _BOOST_UBLAS_TENSOR_INDEX_HPP_ diff --git a/include/boost/numeric/ublas/tensor/index_functions.hpp b/include/boost/numeric/ublas/tensor/index_functions.hpp new file mode 100644 index 000000000..bc3d54ac0 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/index_functions.hpp @@ -0,0 +1,64 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_INDEX_FUNCTIONS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_INDEX_FUNCTIONS_HPP + +#include +#include +#include +#include "concepts.hpp" + + +namespace boost::numeric::ublas::detail +{ + +/** @brief Returns relative memory index with respect to a multi-index + * + * @code auto j = to_index({3,4,5}, to_strides({4,2,3},first_order{})); @endcode + * + * @param[in] i multi-index of length p + * @param[in] w stride vector of length p + * @returns relative memory location depending on \c i and \c w + */ +template +[[nodiscard]] inline constexpr auto to_index(std::vector const& w, std::vector const& i) +{ + return std::inner_product(i.begin(), i.end(), w.begin(), T{}); +} + +template +[[nodiscard]] inline constexpr auto to_index(std::array const& w, std::array const& i) +{ + return std::inner_product(i.begin(), i.end(), w.begin(), T{}); +} + +template +[[nodiscard]] inline constexpr auto to_index(std::array const& w, Is ... is) +{ + static_assert(N != sizeof...(is)+2); + auto ai = std::array{I(is)...}; + return std::inner_product(ai.begin(), ai.end(), w.begin(), I{}); +} + +template +[[nodiscard]] inline auto to_index(std::vector const& w, Is ... is) +{ + constexpr auto N = sizeof...(is); + auto ai = std::array{I(is)...}; + return std::inner_product(ai.begin(), ai.end(), w.begin(), std::size_t{}); +} + + +} // namespace boost::numeric::ublas::detail + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_INDEX_FUNCTIONS_HPP diff --git a/include/boost/numeric/ublas/tensor/multi_index.hpp b/include/boost/numeric/ublas/tensor/multi_index.hpp index 5b4638944..adb5f8707 100644 --- a/include/boost/numeric/ublas/tensor/multi_index.hpp +++ b/include/boost/numeric/ublas/tensor/multi_index.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,30 +13,16 @@ #define BOOST_UBLAS_TENSOR_MULTI_INDEX_HPP -#include + #include +#include #include -#include "multi_index_utility.hpp" #include "index.hpp" - -namespace boost { -namespace numeric { -namespace ublas { -namespace index { - -template -struct index_type; - -} // namespace indices -} -} -} +#include "multi_index_utility.hpp" -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas { /** @brief Proxy class for the einstein summation notation * @@ -49,7 +35,8 @@ class multi_index multi_index() = delete; template - constexpr multi_index(index::index_type const& i, indexes ... is ) + constexpr explicit inline + multi_index(index::index_type const& i, indexes ... is ) : _base{i(), is()... } { static_assert( sizeof...(is)+1 == N, @@ -59,30 +46,34 @@ class multi_index "Static assert in boost::numeric::ublas::multi_index: indexes occur twice in multi-index." ); } - multi_index(multi_index const& other) - : _base(other._base) + multi_index(multi_index const& other) = default; + multi_index(multi_index&& other) noexcept = default ; + + multi_index& operator=(multi_index other) { + std::swap(this->_base,other._base); + return *this; } - multi_index& operator=(multi_index const& other) + multi_index& operator=(multi_index&& other) noexcept { - this->_base = other._base; - return *this; + this->_base = std::move(other._base); + return *this; } ~multi_index() = default; - auto const& base() const { return _base; } - constexpr auto size() const { return _base.size(); } - constexpr auto at(std::size_t i) const { return _base.at(i); } - constexpr auto operator[](std::size_t i) const { return _base.at(i); } + [[nodiscard]] inline auto const& base() const { return _base; } + [[nodiscard]] inline constexpr auto size() const { return _base.size(); } + [[nodiscard]] inline constexpr auto at(std::size_t i) const { return _base.at(i); } + [[nodiscard]] inline constexpr auto operator[](std::size_t i) const { return _base.at(i); } private: std::array _base; }; template -constexpr auto get(multi_index const& m) { return std::get(m.base()); } +inline constexpr auto get(multi_index const& m) { return std::get(m.base()); } template auto array_to_vector(multi_index const& lhs, multi_index const& rhs) @@ -91,22 +82,17 @@ auto array_to_vector(multi_index const& lhs, multi_index const& rhs) auto pair_of_vector = std::make_pair( vtype {}, vtype{} ); - for(auto i = 0u; i < N; ++i) - for(auto j = 0u; j < M; ++j) + for(auto i = 0ul; i < N; ++i){ + for(auto j = 0ul; j < M; ++j){ if ( lhs.at(i) == rhs.at(j) && lhs.at(i) != boost::numeric::ublas::index::_()){ pair_of_vector.first .push_back( i+1 ); pair_of_vector.second.push_back( j+1 ); } - + } + } return pair_of_vector; } - - - - -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas #endif // MULTI_INDEX_HPP diff --git a/include/boost/numeric/ublas/tensor/multi_index_utility.hpp b/include/boost/numeric/ublas/tensor/multi_index_utility.hpp index f4593e1fd..aa98fa4b0 100644 --- a/include/boost/numeric/ublas/tensor/multi_index_utility.hpp +++ b/include/boost/numeric/ublas/tensor/multi_index_utility.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,15 +13,12 @@ #define BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP +#include #include #include -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { - +namespace boost::numeric::ublas::detail { template struct has_index_impl; @@ -50,9 +47,12 @@ struct has_index_impl > using next_type = has_index_impl>; static constexpr bool value = has_index_impl::value || next_type::value; }; -} // namespace detail +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas +{ /** @brief has_index is true if index occurs once or more in a multi-index * @@ -69,17 +69,12 @@ struct has_index static constexpr bool value = detail::has_index_impl,std::decay_t>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail { template @@ -108,7 +103,10 @@ struct valid_multi_index_impl> static constexpr bool has_index_value = has_index_type::value && !is_index_zero; static constexpr bool value = !has_index_value && valid_multi_index_impl::value; }; -} // namespace detail +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas +{ /** @brief valid_multi_index is true if indexes occur only once in a multi-index * @@ -125,17 +123,13 @@ struct valid_multi_index static constexpr bool value = detail::valid_multi_index_impl>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail +{ template struct number_equal_indexes_impl; @@ -159,9 +153,11 @@ struct number_equal_indexes_impl < std::tuple, std::tuple< static constexpr unsigned v = has_index_value ? 1 : 0; static constexpr unsigned value = v + next_type::value; }; -} // namespace detail +} // namespace boost::numeric::ublas::detail +namespace boost::numeric::ublas { + /** @brief number_equal_indexes contains the number of equal indexes of two multi-indexes * * @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type @@ -182,18 +178,14 @@ struct number_equal_indexes detail::number_equal_indexes_impl< std::decay_t, std::decay_t>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail +{ template @@ -211,10 +203,11 @@ struct index_position_impl < m, m, itype, ttype> static constexpr auto value = std::tuple_size::value; }; -} // namespace detail - +} // namespace boost::numeric::ublas::detail +namespace boost::numeric::ublas +{ /** @brief index_position contains the zero-based index position of an index type within a multi-index * * @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type @@ -235,18 +228,14 @@ struct index_position static constexpr auto value = detail::index_position_impl<0ul,std::tuple_size::value,std::decay_t,std::decay_t>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail +{ template struct index_position_pairs_impl @@ -295,9 +284,10 @@ struct index_position_pairs_impl }; -} // namespace detail - +} // namespace boost::numeric::ublas::detail +namespace boost::numeric::ublas +{ /** @brief index_position_pairs returns zero-based index positions of matching indexes of two multi-indexes * * @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type @@ -321,9 +311,7 @@ auto index_position_pairs(tuple_left const& lhs, tuple_right const& rhs) return array; } -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////// //////////////////////////// @@ -331,42 +319,40 @@ auto index_position_pairs(tuple_left const& lhs, tuple_right const& rhs) //////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +//namespace boost::numeric::ublas::detail +//{ -template -constexpr auto array_to_vector_impl( array_type const& array, [[maybe_unused]] std::index_sequence sq) -{ - return std::make_pair( - std::vector{std::get<0>( std::get(array) )+1 ...} , - std::vector{std::get<1>( std::get(array) )+1 ...} ); -} +//template +//constexpr auto array_to_vector_impl( array_type const& array, std::index_sequence /*unused*/) +//{ +// return std::make_pair( +// std::vector{std::get(array).first +1 ...} , +// std::vector{std::get(array).second +1 ...} ); +//} -} // namespace detail +//} // namespace boost::numeric::ublas::detail -/** @brief array_to_vector converts a std::array of zero-based index position pairs into two std::vector of one-based index positions - * - * @code auto two_vectors = array_to_vector(std::make_array ( std::make_pair(1,2), std::make_pair(3,4) ) ) ; - * @endcode - * - * @returns two std::vector of one-based index positions - * - * @param array std::array of zero-based index position pairs -*/ -template -constexpr auto array_to_vector( std::array const& array) -{ - constexpr auto sequence = std::make_index_sequence{}; - return detail::array_to_vector_impl( array, sequence ); -} +//namespace boost::numeric::ublas +//{ +///** @brief array_to_vector converts a std::array of zero-based index position pairs into two std::vector of one-based index positions +// * +// * @code auto two_vectors = array_to_vector(std::make_array ( std::make_pair(1,2), std::make_pair(3,4) ) ) ; +// * @endcode +// * +// * @returns two std::vector of one-based index positions +// * +// * @param array std::array of zero-based index position pairs +//*/ +//template +//constexpr auto array_to_vector( std::array const& array) +//{ +// constexpr auto sequence = std::make_index_sequence{}; +// return detail::array_to_vector_impl( array, sequence ); +//} -} // namespace ublas -} // namespace numeric -} // namespace boost +//} // namespace boost::numeric::ublas -#endif // _BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP_ +#endif // BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP diff --git a/include/boost/numeric/ublas/tensor/multiplication.hpp b/include/boost/numeric/ublas/tensor/multiplication.hpp index 934dd2d17..6a9c0613b 100644 --- a/include/boost/numeric/ublas/tensor/multiplication.hpp +++ b/include/boost/numeric/ublas/tensor/multiplication.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -15,11 +15,8 @@ #include -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { -namespace recursive { +namespace boost::numeric::ublas { +namespace detail::recursive { /** @brief Computes the tensor-times-tensor product for q contraction modes @@ -57,34 +54,30 @@ void ttt(SizeType const k, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(k < r) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == na[phia[k]-1]); - for(size_t ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic) - ttt(k+1, r, s, q, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[phib[k-r]-1]); - for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) - ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s+q-1) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); - for(size_t ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) - ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); - for(size_t ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) - *c += *a * *b; + if(k < r) { + assert(nc[k] == na[phia[k]-1]); + for(SizeType ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic) { + ttt(k+1, r, s, q, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); } + } + else if(k < r+s) { + assert(nc[k] == nb[phib[k-r]-1]); + for(SizeType ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) { + ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s+q-1) { + assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); + for(SizeType ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) { + ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else { + assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); + for(SizeType ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) { + *c += *a * *b; + } + } } @@ -124,34 +117,30 @@ void ttt(SizeType const k, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(k < r) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == na[k]); - for(size_t ic = 0u; ic < nc[k]; a += wa[k], c += wc[k], ++ic) - ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[k-r]); - for(size_t ic = 0u; ic < nc[k]; b += wb[k-r], c += wc[k], ++ic) - ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s+q-1) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[k-s] == nb[k-r]); - for(size_t ia = 0u; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) - ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); - } - else - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[k-s] == nb[k-r]); - for(size_t ia = 0u; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) - *c += *a * *b; + if(k < r) { + assert(nc[k] == na[k]); + for(auto ic = 0ul; ic < nc[k]; a += wa[k], c += wc[k], ++ic) { + ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s) { + assert(nc[k] == nb[k-r]); + for(auto ic = 0ul; ic < nc[k]; b += wb[k-r], c += wc[k], ++ic) { + ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s+q-1) { + assert(na[k-s] == nb[k-r]); + for(auto ia = 0ul; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) { + ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else { + assert(na[k-s] == nb[k-r]); + for(auto ia = 0ul; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) { + *c += *a * *b; } + } } @@ -181,26 +170,28 @@ void ttm(SizeType const m, SizeType const r, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(r == m) { - ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(r == 0){ - for(auto i0 = 0ul; i0 < nc[0]; c += wc[0], a += wa[0], ++i0) { - auto cm = c; - auto b0 = b; - for(auto i1 = 0ul; i1 < nc[m]; cm += wc[m], b0 += wb[0], ++i1){ - auto am = a; - auto b1 = b0; - for(auto i2 = 0ul; i2 < nb[1]; am += wa[m], b1 += wb[1], ++i2) - *cm += *am * *b1; - } + if(r == m) { + ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); + } + else if(r == 0){ + for(auto i0 = 0ul; i0 < nc[0]; c += wc[0], a += wa[0], ++i0) { + auto cm = c; + auto b0 = b; + for(auto i1 = 0ul; i1 < nc[m]; cm += wc[m], b0 += wb[0], ++i1){ + auto am = a; + auto b1 = b0; + for(auto i2 = 0ul; i2 < nb[1]; am += wa[m], b1 += wb[1], ++i2){ + *cm += *am * *b1; } + } } + } - else{ - for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i) - ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); + else{ + for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i){ + ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); } + } } /** @brief Computes the tensor-times-matrix product for the contraction mode m = 0 @@ -223,31 +214,31 @@ void ttm(SizeType const m, SizeType const r, */ template void ttm0( SizeType const r, - PointerOut c, SizeType const*const nc, SizeType const*const wc, - PointerIn1 a, SizeType const*const na, SizeType const*const wa, - PointerIn2 b, SizeType const*const nb, SizeType const*const wb) + PointerOut c, SizeType const*const nc, SizeType const*const wc, + PointerIn1 a, SizeType const*const na, SizeType const*const wa, + PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(r > 1){ - for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i) - ttm0(r-1, c, nc, wc, a, na, wa, b, nb, wb); + if(r > 1){ + for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i){ + ttm0(r-1, c, nc, wc, a, na, wa, b, nb, wb); } - else{ - for(auto i1 = 0ul; i1 < nc[1]; c += wc[1], a += wa[1], ++i1) { - auto cm = c; - auto b0 = b; - // r == m == 0 - for(auto i0 = 0ul; i0 < nc[0]; cm += wc[0], b0 += wb[0], ++i0){ - - auto am = a; - auto b1 = b0; - for(auto i2 = 0u; i2 < nb[1]; am += wa[0], b1 += wb[1], ++i2){ - - *cm += *am * *b1; - } - } + } + else{ + for(auto i1 = 0ul; i1 < nc[1]; c += wc[1], a += wa[1], ++i1) { + auto cm = c; + auto b0 = b; + // r == m == 0 + for(auto i0 = 0ul; i0 < nc[0]; cm += wc[0], b0 += wb[0], ++i0){ + + auto am = a; + auto b1 = b0; + for(auto i2 = 0u; i2 < nb[1]; am += wa[0], b1 += wb[1], ++i2){ + *cm += *am * *b1; } + } } + } } @@ -277,25 +268,29 @@ void ttm0( SizeType const r, template void ttv( SizeType const m, SizeType const r, SizeType const q, - PointerOut c, SizeType const*const nc, SizeType const*const wc, - PointerIn1 a, SizeType const*const na, SizeType const*const wa, - PointerIn2 b) + PointerOut c, SizeType const*const nc, SizeType const*const wc, + PointerIn1 a, SizeType const*const na, SizeType const*const wa, + PointerIn2 b) { - if(r == m) { - ttv(m, r-1, q, c, nc, wc, a, na, wa, b); + if(r == m) { + ttv(m, r-1, q, c, nc, wc, a, na, wa, b); + } + else if(r == 0){ + for(auto i0 = 0u; i0 < na[0]; c += wc[0], a += wa[0], ++i0) { + auto c1 = c; + auto a1 = a; + auto b1 = b; + for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im){ + *c1 += *a1 * *b1; + } } - else if(r == 0){ - for(auto i0 = 0u; i0 < na[0]; c += wc[0], a += wa[0], ++i0) { - auto c1 = c; auto a1 = a; auto b1 = b; - for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) - *c1 += *a1 * *b1; - } - } - else{ - for(auto i = 0u; i < na[r]; c += wc[q], a += wa[r], ++i) - ttv(m, r-1, q-1, c, nc, wc, a, na, wa, b); + } + else{ + for(auto i = 0u; i < na[r]; c += wc[q], a += wa[r], ++i){ + ttv(m, r-1, q-1, c, nc, wc, a, na, wa, b); } + } } @@ -322,18 +317,21 @@ void ttv0(SizeType const r, PointerIn2 b) { - if(r > 1){ - for(auto i = 0u; i < na[r]; c += wc[r-1], a += wa[r], ++i) - ttv0(r-1, c, nc, wc, a, na, wa, b); + if(r > 1){ + for(auto i = 0u; i < na[r]; c += wc[r-1], a += wa[r], ++i) { + ttv0(r-1, c, nc, wc, a, na, wa, b); } - else{ - for(auto i1 = 0u; i1 < na[1]; c += wc[0], a += wa[1], ++i1) - { - auto c1 = c; auto a1 = a; auto b1 = b; - for(auto i0 = 0u; i0 < na[0]; a1 += wa[0], ++b1, ++i0) - *c1 += *a1 * *b1; - } + } + else{ + for(auto i1 = 0u; i1 < na[1]; c += wc[0], a += wa[1], ++i1) { + auto c1 = c; + auto a1 = a; + auto b1 = b; + for(auto i0 = 0u; i0 < na[0]; a1 += wa[0], ++b1, ++i0){ + *c1 += *a1 * *b1; + } } + } } @@ -354,18 +352,21 @@ void ttv0(SizeType const r, */ template void mtv(SizeType const m, - PointerOut c, [[maybe_unused]] SizeType const*const nc, SizeType const*const wc, - PointerIn1 a, SizeType const*const na, SizeType const*const wa, + PointerOut c, SizeType const*const /*unsed*/, SizeType const*const wc, + PointerIn1 a, SizeType const*const na , SizeType const*const wa, PointerIn2 b) { - // decides whether matrix multiplied with vector or vector multiplied with matrix - const auto o = (m == 0) ? 1 : 0; - - for(auto io = 0u; io < na[o]; c += wc[o], a += wa[o], ++io) { - auto c1 = c; auto a1 = a; auto b1 = b; - for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) - *c1 += *a1 * *b1; + // decides whether matrix multiplied with vector or vector multiplied with matrix + const auto o = (m == 0) ? 1 : 0; + + for(auto io = 0u; io < na[o]; c += wc[o], a += wa[o], ++io) { + auto c1 = c; + auto a1 = a; + auto b1 = b; + for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) { + *c1 += *a1 * *b1; } + } } @@ -391,28 +392,23 @@ void mtm(PointerOut c, SizeType const*const nc, SizeType const*const wc, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - // C(i,j) = A(i,k) * B(k,j) - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[0] == na[0]); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[1] == nb[1]); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[1] == nb[0]); - - auto cj = c; auto bj = b; - for(auto j = 0u; j < nc[1]; cj += wc[1], bj += wb[1], ++j) { - - auto bk = bj; auto ak = a; - for(auto k = 0u; k < na[1]; ak += wa[1], bk += wb[0], ++k) { - - auto ci = cj; auto ai = ak; - for(auto i = 0u; i < na[0]; ai += wa[0], ci += wc[0], ++i){ - *ci += *ai * *bk; - } - - } - + // C(i,j) = A(i,k) * B(k,j) + assert(nc[0] == na[0]); + assert(nc[1] == nb[1]); + assert(na[1] == nb[0]); + + auto cj = c; auto bj = b; + for(auto j = 0u; j < nc[1]; cj += wc[1], bj += wb[1], ++j) { + auto bk = bj; + auto ak = a; + for(auto k = 0u; k < na[1]; ak += wa[1], bk += wb[0], ++k) { + auto ci = cj; + auto ai = ak; + for(auto i = 0u; i < na[0]; ai += wa[0], ci += wc[0], ++i){ + *ci += *ai * *bk; + } } + } } @@ -438,13 +434,17 @@ value_t inner(SizeType const r, SizeType const*const n, PointerIn2 b, SizeType const*const wb, value_t v) { - if(r == 0) - for(auto i0 = 0u; i0 < n[0]; a += wa[0], b += wb[0], ++i0) - v += *a * *b; - else - for(auto ir = 0u; ir < n[r]; a += wa[r], b += wb[r], ++ir) - v = inner(r-1, n, a, wa, b, wb, v); - return v; + if(r == 0){ + for(auto i0 = 0u; i0 < n[0]; a += wa[0], b += wb[0], ++i0){ + v += *a * *b; + } + } + else{ + for(auto ir = 0u; ir < n[r]; a += wa[r], b += wb[r], ++ir){ + v = inner(r-1, n, a, wa, b, wb, v); + } + } + return v; } @@ -454,25 +454,26 @@ void outer_2x2(SizeType const pa, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - // assert(rc == 3); - // assert(ra == 1); - // assert(rb == 1); - - for(auto ib1 = 0u; ib1 < nb[1]; b += wb[1], c += wc[pa+1], ++ib1) { - auto c2 = c; - auto b0 = b; - for(auto ib0 = 0u; ib0 < nb[0]; b0 += wb[0], c2 += wc[pa], ++ib0) { - const auto new_b = *b0; - auto c1 = c2; - auto a1 = a; - for(auto ia1 = 0u; ia1 < na[1]; a1 += wa[1], c1 += wc[1], ++ia1) { - auto a0 = a1; - auto c0 = c1; - for(SizeType ia0 = 0u; ia0 < na[0]; a0 += wa[0], c0 += wc[0], ++ia0) - *c0 = *a0 * new_b; - } + // assert(rc == 3); + // assert(ra == 1); + // assert(rb == 1); + + for(auto ib1 = 0u; ib1 < nb[1]; b += wb[1], c += wc[pa+1], ++ib1) { + auto c2 = c; + auto b0 = b; + for(auto ib0 = 0u; ib0 < nb[0]; b0 += wb[0], c2 += wc[pa], ++ib0) { + const auto new_b = *b0; + auto c1 = c2; + auto a1 = a; + for(auto ia1 = 0u; ia1 < na[1]; a1 += wa[1], c1 += wc[1], ++ia1) { + auto a0 = a1; + auto c0 = c1; + for(SizeType ia0 = 0u; ia0 < na[0]; a0 += wa[0], c0 += wc[0], ++ia0){ + *c0 = *a0 * new_b; } + } } + } } /** @brief Computes the outer product of two tensors @@ -505,14 +506,19 @@ void outer(SizeType const pa, SizeType const ra, PointerIn1 a, SizeType const*const na, SizeType const*const wa, SizeType const rb, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(rb > 1) - for(auto ib = 0u; ib < nb[rb]; b += wb[rb], c += wc[rc], ++ib) - outer(pa, rc-1, c, nc, wc, ra, a, na, wa, rb-1, b, nb, wb); - else if(ra > 1) - for(auto ia = 0u; ia < na[ra]; a += wa[ra], c += wc[ra], ++ia) - outer(pa, rc-1, c, nc, wc, ra-1, a, na, wa, rb, b, nb, wb); - else - outer_2x2(pa, c, nc, wc, a, na, wa, b, nb, wb); //assert(ra==1 && rb==1 && rc==3); + if(rb > 1){ + for(auto ib = 0u; ib < nb[rb]; b += wb[rb], c += wc[rc], ++ib){ + outer(pa, rc-1, c, nc, wc, ra, a, na, wa, rb-1, b, nb, wb); + } + } + else if(ra > 1){ + for(auto ia = 0u; ia < na[ra]; a += wa[ra], c += wc[ra], ++ia){ + outer(pa, rc-1, c, nc, wc, ra-1, a, na, wa, rb, b, nb, wb); + } + } + else{ + outer_2x2(pa, c, nc, wc, a, na, wa, b, nb, wb); //assert(ra==1 && rb==1 && rc==3); + } } @@ -551,38 +557,29 @@ void outer(SizeType const k, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(k < r) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == na[phia[k]-1]); - for(size_t ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic) - outer(k+1, r, s, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s-1) - { - - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[phib[k-r]-1]); - for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) - outer(k+1, r, s, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[phib[k-r]-1]); - for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) - *c = *a * *b; + if(k < r) { + assert(nc[k] == na[phia[k]-1]); + for(auto ic = 0ul; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic){ + outer(k+1, r, s, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s-1) { + assert(nc[k] == nb[phib[k-r]-1]); + for(auto ic = 0ul; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic){ + outer(k+1, r, s, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else { + assert(nc[k] == nb[phib[k-r]-1]); + for(auto ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic){ + *c = *a * *b; } + } } -} // namespace recursive -} // namespace detail -} // namespace ublas -} // namespace numeric -} // namespace boost - - +} // namespace detail::recursive +} // namespace boost::numeric::ublas ////////////////////////////////////////////////////////////////////////////////////////// @@ -598,9 +595,7 @@ void outer(SizeType const k, #include -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas { /** @brief Computes the tensor-times-vector product * @@ -628,44 +623,53 @@ void ttv(SizeType const m, SizeType const p, const PointerIn1 a, SizeType const*const na, SizeType const*const wa, const PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttv: Argument types for pointers are not pointer types."); - - if( m == 0) - throw std::length_error("Error in boost::numeric::ublas::ttv: Contraction mode must be greater than zero."); - - if( p < m ) - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater equal the modus."); - - if( p == 0) - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater than zero."); - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttv: Pointers shall not be null pointers."); - - for(auto i = 0u; i < m-1; ++i) - if(na[i] != nc[i]) - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - - for(auto i = m; i < p; ++i) - if(na[i] != nc[i-1]) - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - - const auto max = std::max(nb[0], nb[1]); - if( na[m-1] != max) - throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); - + static_assert( std::is_pointer::value && std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::ttv: Argument types for pointers are not pointer types."); + + if( m == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Contraction mode must be greater than zero."); + } + + if( p < m ){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater equal the modus."); + } + if( p == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater than zero."); + } + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Pointers shall not be null pointers."); + } + for(auto i = 0u; i < m-1; ++i){ + if(na[i] != nc[i]){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); + } + } - if((m != 1) && (p > 2)) - detail::recursive::ttv(m-1, p-1, p-2, c, nc, wc, a, na, wa, b); - else if ((m == 1) && (p > 2)) - detail::recursive::ttv0(p-1, c, nc, wc, a, na, wa, b); - else if( p == 2 ) - detail::recursive::mtv(m-1, c, nc, wc, a, na, wa, b); - else /*if( p == 1 )*/{ - auto v = std::remove_pointer_t>{}; - *c = detail::recursive::inner(SizeType(0), na, a, wa, b, wb, v); + for(auto i = m; i < p; ++i){ + if(na[i] != nc[i-1]){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); } + } + + const auto max = std::max(nb[0], nb[1]); + if( na[m-1] != max){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); + } + + + if((m != 1) && (p > 2)){ + detail::recursive::ttv(m-1, p-1, p-2, c, nc, wc, a, na, wa, b); + } + else if ((m == 1) && (p > 2)){ + detail::recursive::ttv0(p-1, c, nc, wc, a, na, wa, b); + } + else if( p == 2 ){ + detail::recursive::mtv(m-1, c, nc, wc, a, na, wa, b); + } + else /*if( p == 1 )*/{ + auto v = std::remove_pointer_t>{}; + *c = detail::recursive::inner(SizeType(0), na, a, wa, b, wb, v); + } } @@ -699,40 +703,47 @@ void ttm(SizeType const m, SizeType const p, const PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); - - if( m == 0 ) - throw std::length_error("Error in boost::numeric::ublas::ttm: Contraction mode must be greater than zero."); - - if( p < m ) - throw std::length_error("Error in boost::numeric::ublas::ttm: Rank must be greater equal than the specified mode."); - - if( p == 0) - throw std::length_error("Error in boost::numeric::ublas::ttm:Rank must be greater than zero."); - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); - - for(auto i = 0u; i < m-1; ++i) - if(na[i] != nc[i]) - throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); - - for(auto i = m; i < p; ++i) - if(na[i] != nc[i]) - throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); - - if(na[m-1] != nb[1]) - throw std::length_error("Error in boost::numeric::ublas::ttm: 2nd Extent of B and M-th Extent of A must be the equal."); - - if(nc[m-1] != nb[0]) - throw std::length_error("Error in boost::numeric::ublas::ttm: 1nd Extent of B and M-th Extent of C must be the equal."); - - if ( m != 1 ) - detail::recursive::ttm (m-1, p-1, c, nc, wc, a, na, wa, b, nb, wb); - else /*if (m == 1 && p > 2)*/ - detail::recursive::ttm0( p-1, c, nc, wc, a, na, wa, b, nb, wb); - + static_assert( + std::is_pointer::value && + std::is_pointer::value && + std::is_pointer::value); + + if( m == 0 ){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Contraction mode must be greater than zero."); + } + if( p < m ){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Rank must be greater equal than the specified mode."); + } + if( p == 0 ){ + throw std::length_error("Error in boost::numeric::ublas::ttm:Rank must be greater than zero."); + } + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); + } + for(auto i = 0u; i < m-1; ++i){ + if(na[i] != nc[i]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); + } + } + for(auto i = m; i < p; ++i){ + if(na[i] != nc[i]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); + } + } + if(na[m-1] != nb[1]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: 2nd Extent of B and M-th Extent of A must be the equal."); + } + if(nc[m-1] != nb[0]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: 1nd Extent of B and M-th Extent of C must be the equal."); + } + + + if ( m != 1 ){ + detail::recursive::ttm (m-1, p-1, c, nc, wc, a, na, wa, b, nb, wb); + } + else{ /*if (m == 1 && p > 2)*/ + detail::recursive::ttm0( p-1, c, nc, wc, a, na, wa, b, nb, wb); + } } @@ -769,39 +780,44 @@ void ttt(SizeType const pa, SizeType const pb, SizeType const q, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); - - if( pa == 0 || pb == 0) - throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); - - if( q > pa && q > pb) - throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); - - - SizeType const r = pa - q; - SizeType const s = pb - q; - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); - - for(auto i = 0ul; i < r; ++i) - if( na[phia[i]-1] != nc[i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); - - for(auto i = 0ul; i < s; ++i) - if( nb[phib[i]-1] != nc[r+i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); - - for(auto i = 0ul; i < q; ++i) - if( nb[phib[s+i]-1] != na[phia[r+i]-1] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); - - - if(q == 0ul) - detail::recursive::outer(SizeType{0},r,s, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); - else - detail::recursive::ttt(SizeType{0},r,s,q, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); + + if( pa == 0 || pb == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); + } + + if( q > pa && q > pb) { + throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); + } + + SizeType const r = pa - q; + SizeType const s = pb - q; + + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); + } + for(auto i = 0ul; i < r; ++i){ + if( na[phia[i]-1] != nc[i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); + } + } + for(auto i = 0ul; i < s; ++i){ + if( nb[phib[i]-1] != nc[r+i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); + } + } + for(auto i = 0ul; i < q; ++i){ + if( nb[phib[s+i]-1] != na[phia[r+i]-1] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); + } + } + if(q == 0ul){ + detail::recursive::outer(SizeType{0},r,s, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); + } + else{ + detail::recursive::ttt(SizeType{0},r,s,q, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); + } } @@ -836,45 +852,51 @@ void ttt(SizeType const pa, SizeType const pb, SizeType const q, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); - - if( pa == 0 || pb == 0) - throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); - - if( q > pa && q > pb) - throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); - - - SizeType const r = pa - q; - SizeType const s = pb - q; - SizeType const pc = r+s; - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); - - for(auto i = 0ul; i < r; ++i) - if( na[i] != nc[i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); - - for(auto i = 0ul; i < s; ++i) - if( nb[i] != nc[r+i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); - - for(auto i = 0ul; i < q; ++i) - if( nb[s+i] != na[r+i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); - - using value_type = std::decay_t; + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); + + if( pa == 0 || pb == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); + } + if( q > pa && q > pb){ + throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); + } + + SizeType const r = pa - q; + SizeType const s = pb - q; + SizeType const pc = r+s; + + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); + } + for(auto i = 0ul; i < r; ++i){ + if( na[i] != nc[i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); + } + } + for(auto i = 0ul; i < s; ++i){ + if( nb[i] != nc[r+i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); + } + } + for(auto i = 0ul; i < q; ++i){ + if( nb[s+i] != na[r+i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); + } + } + using value_type = std::decay_t; - if(q == 0ul) - detail::recursive::outer(pa, pc-1, c,nc,wc, pa-1, a,na,wa, pb-1, b,nb,wb); - else if(r == 0ul && s == 0ul) - *c = detail::recursive::inner(q-1, na, a,wa, b,wb, value_type(0) ); - else - detail::recursive::ttt(SizeType{0},r,s,q, c,nc,wc, a,na,wa, b,nb,wb); + if(q == 0ul){ + detail::recursive::outer(pa, pc-1, c,nc,wc, pa-1, a,na,wa, pb-1, b,nb,wb); + } + else if(r == 0ul && s == 0ul){ + *c = detail::recursive::inner(q-1, na, a,wa, b,wb, value_type(0) ); + } + else{ + detail::recursive::ttt(SizeType{0},r,s,q, c,nc,wc, a,na,wa, b,nb,wb); + } } @@ -900,14 +922,16 @@ auto inner(const SizeType p, SizeType const*const n, const PointerIn2 b, SizeType const*const wb, value_t v) { - static_assert( std::is_pointer::value && std::is_pointer::value, - "Static error in boost::numeric::ublas::inner: Argument types for pointers must be pointer types."); - if(p<2) - throw std::length_error("Error in boost::numeric::ublas::inner: Rank must be greater than zero."); - if(a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::inner: Pointers shall not be null pointers."); + static_assert( std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::inner: Argument types for pointers must be pointer types."); + if(p<2){ + throw std::length_error("Error in boost::numeric::ublas::inner: Rank must be greater than zero."); + } + if(a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::inner: Pointers shall not be null pointers."); + } - return detail::recursive::inner(p-1, n, a, wa, b, wb, v); + return detail::recursive::inner(p-1, n, a, wa, b, wb, v); } @@ -936,24 +960,25 @@ void outer(PointerOut c, SizeType const pc, SizeType const*const nc, SizeT const PointerIn1 a, SizeType const pa, SizeType const*const na, SizeType const*const wa, const PointerIn2 b, SizeType const pb, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::outer: argument types for pointers must be pointer types."); - if(pa < 2u || pb < 2u) - throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs and rhs tensor must be equal or greater than two."); - if((pa + pb) != pc) - throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs plus rhs tensor must be equal to the number of extents of C."); - if(a == nullptr || b == nullptr || c == nullptr) - throw std::length_error("Error in boost::numeric::ublas::outer: pointers shall not be null pointers."); - - detail::recursive::outer(pa, pc-1, c, nc, wc, pa-1, a, na, wa, pb-1, b, nb, wb); + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::outer: argument types for pointers must be pointer types."); + if(pa < 2u || pb < 2u){ + throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs and rhs tensor must be equal or greater than two."); + } + if((pa + pb) != pc){ + throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs plus rhs tensor must be equal to the number of extents of C."); + } + if(a == nullptr || b == nullptr || c == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::outer: pointers shall not be null pointers."); + } + + detail::recursive::outer(pa, pc-1, c, nc, wc, pa-1, a, na, wa, pb-1, b, nb, wb); } -} -} -} +} // namespace boost::numeric::ublas #endif diff --git a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp index c6679a3d7..fa89d431f 100644 --- a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp +++ b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -16,16 +16,14 @@ #include "expression_evaluation.hpp" #include "multi_index_utility.hpp" #include "functions.hpp" -#include +#include "type_traits.hpp" #include #include #include -namespace boost{ -namespace numeric{ -namespace ublas { - +namespace boost::numeric::ublas +{ template class tensor_core; @@ -37,15 +35,14 @@ class matrix_expression; template class vector_expression; -} -} -} +} // namespace boost::numeric::ublas + template inline -constexpr auto operator*( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -53,9 +50,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -63,9 +60,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -73,9 +70,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -84,9 +81,9 @@ constexpr auto operator/( template inline -constexpr auto operator*( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -94,9 +91,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -104,9 +101,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -114,9 +111,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -125,9 +122,9 @@ constexpr auto operator/( template inline -constexpr auto operator*( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -135,9 +132,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -145,9 +142,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -155,9 +152,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -166,9 +163,9 @@ constexpr auto operator/( template inline -constexpr auto operator*( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -176,9 +173,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -186,9 +183,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -196,9 +193,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -207,191 +204,191 @@ constexpr auto operator/( template inline -constexpr auto operator+( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator+( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator+() : LHS tensor and RHS tensor should have the same value type" - ); - - if constexpr( !std::is_same_v ){ - auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); - - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator+() : LHS tensor and RHS tensor should have the same value type" + ); + + if constexpr( !std::is_same_v ){ + auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); + + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l + r; }); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l + r; }); } template inline -constexpr auto operator-( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator-( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator-() : LHS tensor and RHS tensor should have the same value type" - ); - if constexpr( !std::is_same_v ){ - auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator-() : LHS tensor and RHS tensor should have the same value type" + ); + + if constexpr( !std::is_same_v ){ + auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l - r; }); -// return boost::numeric::ublas::detail::make_lambda([&lhs,&rhs](std::size_t i){ return lhs(i) - rhs(i);}); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l - r; }); + // return boost::numeric::ublas::detail::make_lambda([&lhs,&rhs](std::size_t i){ return lhs(i) - rhs(i);}); } template inline -constexpr auto operator*( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator*( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator*() : LHS tensor and RHS tensor should have the same value type" - ); - if constexpr( !std::is_same_v ){ - auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator*() : LHS tensor and RHS tensor should have the same value type" + ); - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + if constexpr( !std::is_same_v ){ + auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); + + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l * r; }); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l * r; }); } template inline -constexpr auto operator/( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator/( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator/() : LHS tensor and RHS tensor should have the same value type" - ); - if constexpr( !std::is_same_v ){ - auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator/() : LHS tensor and RHS tensor should have the same value type" + ); + + if constexpr( !std::is_same_v ){ + auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), std::divides<>{}); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), std::divides<>{}); } // Overloaded Arithmetic Operators with Scalars template inline -constexpr auto operator+(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator+(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs + r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs + r; }); } template inline -constexpr auto operator-(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator-(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs - r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs - r; }); } template inline -constexpr auto operator*(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator*(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs * r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs * r; }); } template inline -constexpr auto operator/(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator/(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs / r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs / r; }); } template inline -constexpr auto operator+(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator+(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l + rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l + rhs; } ); } template inline -constexpr auto operator-(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator-(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l - rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l - rhs; } ); } template inline -constexpr auto operator*(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator*(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l * rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l * rhs; } ); } template inline -constexpr auto operator/(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator/(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l / rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l / rhs; } ); } template inline -constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } ); + return lhs; } template inline -constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } ); + return lhs; } template inline -constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } ); + return lhs; } template inline -constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } ); + return lhs; } @@ -399,36 +396,36 @@ constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, template inline -constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l+=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l+=r; } ); + return lhs; } template inline -constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l-=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l-=r; } ); + return lhs; } template inline -constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l*=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l*=r; } ); + return lhs; } template constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l/=r; } ); + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l/=r; } ); return lhs; } @@ -438,15 +435,15 @@ constexpr auto& operator /= (boost::numeric::ublas::tensor_core& l template -inline -constexpr auto const& operator +(const boost::numeric::ublas::detail::tensor_expression& lhs) noexcept{ - return lhs; +inline constexpr + auto const& operator +(const boost::numeric::ublas::detail::tensor_expression& lhs) noexcept{ + return lhs; } template -inline -constexpr auto operator -(boost::numeric::ublas::detail::tensor_expression const& lhs) { - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), std::negate<>{} ); +inline constexpr + auto operator -(boost::numeric::ublas::detail::tensor_expression const& lhs) { + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), std::negate<>{} ); } @@ -459,33 +456,53 @@ constexpr auto operator -(boost::numeric::ublas::detail::tensor_expression template auto operator*( - std::pair< tensor_type_left const&, tuple_type_left > lhs, - std::pair< tensor_type_right const&, tuple_type_right > rhs) + std::pair< tensor_type_left const&, tuple_type_left > lhs, + std::pair< tensor_type_right const&, tuple_type_right > rhs) { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; - auto const& tensor_left = lhs.first; - auto const& tensor_right = rhs.first; + auto const& tensor_left = lhs.first; + auto const& tensor_right = rhs.first; - auto multi_index_left = lhs.second; - auto multi_index_right = rhs.second; + auto multi_index_left = lhs.second; + auto multi_index_right = rhs.second; - static constexpr auto num_equal_ind = number_equal_indexes::value; + static constexpr auto num_equal_ind = ublas::number_equal_indexes::value; - if constexpr ( num_equal_ind == 0 ){ - return tensor_left * tensor_right; - } - else if constexpr ( num_equal_ind==std::tuple_size::value && std::is_same::value ){ + if constexpr ( num_equal_ind == 0 ){ + return tensor_left * tensor_right; + } + else if constexpr ( num_equal_ind==std::tuple_size::value && std::is_same::value ){ - return boost::numeric::ublas::inner_prod( tensor_left, tensor_right ); - } - else { - auto array_index_pairs = index_position_pairs(multi_index_left,multi_index_right); - auto index_pairs = array_to_vector( array_index_pairs ); - return boost::numeric::ublas::prod( tensor_left, tensor_right, index_pairs.first, index_pairs.second ); + return ublas::inner_prod( tensor_left, tensor_right ); + } + else { + auto index_pairs = ublas::index_position_pairs(multi_index_left,multi_index_right); + constexpr auto size = std::tuple_size_v; + + using extents_left_type = typename tensor_type_left ::extents_type; + using extents_right_type = typename tensor_type_right::extents_type; + + constexpr bool has_dynamic_extents = ublas::is_dynamic_rank_v || ublas::is_dynamic_rank_v; + + using index_tuple = std::conditional_t, std::array>; + + auto phi_left = index_tuple{}; + auto phi_right = index_tuple{}; + + if constexpr(has_dynamic_extents) { + phi_left .resize(size); + phi_right.resize(size); } + std::transform(index_pairs.begin(), index_pairs.end(), phi_left .begin(), [](auto a){ return a.first +1ul; } ); + std::transform(index_pairs.begin(), index_pairs.end(), phi_right.begin(), [](auto b){ return b.second +1ul; } ); + +// auto index_pairs = ublas::array_to_vector( array_index_pairs ); + return ublas::prod( tensor_left, tensor_right, phi_left, phi_right ); + } + } #endif diff --git a/include/boost/numeric/ublas/tensor/operators_comparison.hpp b/include/boost/numeric/ublas/tensor/operators_comparison.hpp index 7516c1731..efc6c7323 100644 --- a/include/boost/numeric/ublas/tensor/operators_comparison.hpp +++ b/include/boost/numeric/ublas/tensor/operators_comparison.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,20 +12,23 @@ #ifndef BOOST_UBLAS_TENSOR_OPERATORS_COMPARISON_HPP #define BOOST_UBLAS_TENSOR_OPERATORS_COMPARISON_HPP -#include -#include -#include #include #include #include #include +#include "extents.hpp" +#include "expression.hpp" +#include "type_traits.hpp" +#include "expression_evaluation.hpp" + namespace boost::numeric::ublas { template class tensor_core; -} +} // namespace boost::numeric::ublas -namespace boost::numeric::ublas::detail { +namespace boost::numeric::ublas::detail +{ template [[nodiscard]] inline @@ -36,7 +39,7 @@ constexpr bool compare(tensor_core const& lhs, tensor_core const& rhs, B "LHS and RHS both should have the same value type" ); - if(lhs.extents() != rhs.extents()){ + if(::operator!=(lhs.extents(),rhs.extents())){ if constexpr(!std::is_same>::value && !std::is_same>::value) throw std::runtime_error( "boost::numeric::ublas::detail::compare(tensor_core const&, tensor_core const&, BinaryPred) : " @@ -95,43 +98,49 @@ constexpr bool compare(tensor_expression const& expr, UnaryPred pred) return compare(T( expr ), pred); } -} +} // namespace boost::numeric::ublas::detail template -[[nodiscard]] inline -constexpr bool operator==( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +[[nodiscard]] inline +constexpr bool operator==( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::equal_to<>{} ); } template [[nodiscard]] inline -constexpr auto operator!=(boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator!=( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::not_equal_to<>{} ); } template [[nodiscard]] inline -constexpr auto operator< ( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator< ( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::less<>{} ); } template [[nodiscard]] inline -constexpr auto operator<=( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator<=( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::less_equal<>{} ); } template [[nodiscard]] inline -constexpr auto operator> ( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator> ( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::greater<>{} ); } template [[nodiscard]] inline -constexpr auto operator>=( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator>=( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::greater_equal<>{} ); } diff --git a/include/boost/numeric/ublas/tensor/ostream.hpp b/include/boost/numeric/ublas/tensor/ostream.hpp index 1940546a3..2ce7940cc 100644 --- a/include/boost/numeric/ublas/tensor/ostream.hpp +++ b/include/boost/numeric/ublas/tensor/ostream.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,25 +12,28 @@ #ifndef BOOST_UBLAS_TENSOR_OSTREAM_HPP #define BOOST_UBLAS_TENSOR_OSTREAM_HPP -#include + +#include "extents/extents_functions.hpp" + + #include -#include +#include + -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { + +namespace boost::numeric::ublas::detail +{ template void print(std::ostream& out, value_type const& p) { - out << p << " "; + out << p << " "; } template void print(std::ostream& out, const std::complex& p) { - out << std::real(p) << "+" << std::imag(p) << "i "; + out << std::real(p) << "+" << std::imag(p) << "i "; } @@ -38,95 +41,80 @@ template void print(std::ostream& out, size_type r, const value_type* p, const size_type* w, const size_type* n) { - if(r < 2) + if(r < 2) + { + out << "[ ... " << std::endl; + + for(auto row = 0u; row < n[0]; p += w[0], ++row) // iterate over one column { - out << "[ ... " << std::endl; - - for(auto row = 0u; row < n[0]; p += w[0], ++row) // iterate over one column - { - auto const* p1 = p; - for(auto col = 0u; col < n[1]; p1 += w[1], ++col) // iterate over first row - { - print(out,*p1); - } - if(row < n[0]-1) - out << "; " << std::endl; - } - out << "]"; + auto const* p1 = p; + for(auto col = 0u; col < n[1]; p1 += w[1], ++col) // iterate over first row + { + print(out,*p1); + } + if(row < n[0]-1){ + out << "; " << std::endl; + } } - else - { - out << "cat("<< r+1 <<",..." << std::endl; - for(auto d = 0u; d < n[r]-1; p += w[r], ++d){ - print(out, r-1, p, w, n); - out << ",..." << std::endl; - } - print(out, r-1, p, w, n); + out << "]"; + } + else + { + out << "cat("<< r+1 <<",..." << std::endl; + for(auto d = 0u; d < n[r]-1; p += w[r], ++d){ + print(out, r-1, p, w, n); + out << ",..." << std::endl; } - if(r>1) - out << ")"; + print(out, r-1, p, w, n); + } + if(r>1){ + out << ")"; + } } //////////////////////////// -} -} -} -} +} // namespace boost::numeric::ublas::detail -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas +{ template class tensor_core; -template -class matrix; - -template -class vector; - -} -} -} +} //namespace boost::numeric::ublas template -std::ostream& operator << (std::ostream& out, boost::numeric::ublas::tensor_core const& t) +std::ostream& operator << (std::ostream& out, class boost::numeric::ublas::tensor_core const& t) { - if(is_scalar(t.extents())){ - out << '['; - boost::numeric::ublas::detail::print(out,t[0]); - out << ']'; - } - else if(is_vector(t.extents())) { - const auto& cat = t.extents().at(0) > t.extents().at(1) ? ';' : ','; - out << '['; - for(auto i = 0u; i < t.size()-1; ++i){ - boost::numeric::ublas::detail::print(out,t[i]); - out << cat << ' '; - } - boost::numeric::ublas::detail::print(out,t[t.size()-1]); - out << ']'; + namespace ublas = boost::numeric::ublas; + + auto const& n = t.extents(); + auto const& w = t.strides(); + + if(is_scalar(n)){ + out << '['; + ublas::detail::print(out,t[0]); + out << ']'; + } + else if(is_vector(n)) { + const auto& cat = n.at(0) > n.at(1) ? ';' : ','; + out << '['; + for(auto i = 0u; i < t.size()-1; ++i){ + ublas::detail::print(out,t[i]); + out << cat << ' '; } - else{ - boost::numeric::ublas::detail::print(out, t.rank()-1, t.data(), t.strides().data(), t.extents().data()); - } - return out; -} - -template - || boost::numeric::ublas::is_extents_v - , int> = 0 -> -std::ostream& operator<<(std::ostream& os, T const& e){ - return os< -#include -#include - -namespace boost::numeric::ublas { - -template class basic_static_extents; - -/** @brief Template class for storing tensor extents for compile time. - * - * @code basic_static_extents<1,2,3,4> t @endcode - * @tparam E parameter pack of extents - * - */ -template -class basic_static_extents{ - -public: - - static constexpr auto _size = sizeof...(E); - - using base_type = std::array; - using value_type = typename base_type::value_type; - using size_type = typename base_type::size_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - static_assert( std::numeric_limits::is_integer, "Static error in basic_static_extents: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, "Static error in basic_static_extents: type must be of type unsigned integer."); - - //@returns the rank of basic_static_extents - [[nodiscard]] inline - constexpr size_type size() const noexcept { return _size; } - - /** - * @param k pos of extent - * @returns the element at given pos - */ - [[nodiscard]] inline - static constexpr const_reference at(size_type k){ - return m_data.at(k); - } - - [[nodiscard]] inline - constexpr const_reference operator[](size_type k) const{ - return m_data[k]; - } - - constexpr basic_static_extents() = default; - - constexpr basic_static_extents(basic_static_extents const&) noexcept = default; - constexpr basic_static_extents(basic_static_extents &&) noexcept = default; - - constexpr basic_static_extents& operator=(basic_static_extents const&) noexcept = default; - constexpr basic_static_extents& operator=(basic_static_extents &&) noexcept = default; - - ~basic_static_extents() = default; - - /** @brief Returns ref to the std::array containing extents */ - [[nodiscard]] inline - constexpr base_type const& base() const noexcept{ - return m_data; - } - - /** @brief Returns pointer to the std::array containing extents */ - [[nodiscard]] inline - constexpr const_pointer data() const noexcept{ - return m_data.data(); - } - - /** @brief Checks if extents is empty or not - * - * @returns true if rank is 0 else false - * - */ - [[nodiscard]] inline - constexpr bool empty() const noexcept { return m_data.empty(); } - - [[nodiscard]] inline - constexpr const_reference back() const{ - return m_data.back(); - } - - [[nodiscard]] inline - constexpr const_iterator begin() const noexcept{ - return m_data.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator end() const noexcept{ - return m_data.end(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return m_data.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return m_data.rend(); - } - - /// msvc 14.27 does not consider 'at' function constexpr. - /// To make msvc happy get function is declared - /// and it will be removed when we start using boost.mp11 - template - static constexpr auto get() noexcept{ - static_assert(I < _size, - "boost::numeric::ublas::basic_static_extents::get() : " - "out of bound access" - ); - using element_at = std::tuple_element_t; - return element_at{}; - } - -private: - static constexpr base_type const m_data{E...}; - /// will be removed when we start using boost.mp11 - using tuple_type = std::tuple< std::integral_constant... >; -}; - - -template -using static_extents = basic_static_extents; - -} // namespace boost::numeric::ublas -#endif diff --git a/include/boost/numeric/ublas/tensor/static_strides.hpp b/include/boost/numeric/ublas/tensor/static_strides.hpp deleted file mode 100644 index f43101b8c..000000000 --- a/include/boost/numeric/ublas/tensor/static_strides.hpp +++ /dev/null @@ -1,267 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// -/// \file strides.hpp Definition for the basic_strides template class - -#ifndef BOOST_UBLAS_TENSOR_STATIC_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_STATIC_STRIDES_HPP - -#include -#include -#include - -namespace boost::numeric::ublas{ - - template class basic_static_strides; - -} // boost::numeric::ublas - -namespace boost::numeric::ublas::detail{ - - namespace impl{ - - // concat two static_stride_list togather - // @code using type = typename concat< static_stride_list, static_stride_list >::type @endcode - template - struct concat; - - template - struct concat< basic_static_extents, basic_static_extents > { - using type = basic_static_extents; - }; - - template - using concat_t = typename concat::type; - - // generates static_stride_list containing ones with specific size - template - struct make_sequence_of_ones; - - template - using make_sequence_of_ones_t = typename make_sequence_of_ones::type; - - template - struct make_sequence_of_ones { - using type = concat_t, make_sequence_of_ones_t>; - }; - - template - struct make_sequence_of_ones { - using type = basic_static_extents; - }; - template - struct make_sequence_of_ones{ - using type = basic_static_extents; - }; - - template - struct extents_to_array; - - template - inline static constexpr auto extents_to_array_v = extents_to_array::value; - - template - struct extents_to_array< basic_static_extents > - { - static constexpr std::array const value = {Es...}; - }; - - } // impl - - - template - using make_sequence_of_ones_t = impl::make_sequence_of_ones_t; - - template - constexpr auto make_static_strides_first_order( [[maybe_unused]] E const& e, [[maybe_unused]] basic_static_extents const& res ){ - if constexpr( I >= E::_size - 1ul ){ - return impl::extents_to_array_v< basic_static_extents >; - }else{ - using res_type = basic_static_extents; - - constexpr auto prod = E::template get().value * res_type::template get().value; - using nextents = basic_static_extents; - return make_static_strides_first_order(e, nextents{}); - } - } - - template - constexpr auto make_static_strides_last_order( [[maybe_unused]] E const& e, [[maybe_unused]] basic_static_extents const& res ){ - if constexpr( I >= E::_size - 1ul ){ - return impl::extents_to_array_v< basic_static_extents >; - }else{ - using res_type = basic_static_extents; - - constexpr auto J = E::_size - I - 1ul; - constexpr auto K = res_type::_size - I - 1ul; - constexpr auto prod = E::template get().value * res_type::template get().value; - using nextents = basic_static_extents; - return make_static_strides_last_order(e, nextents{}); - } - } - - template - constexpr auto make_static_strides( [[maybe_unused]] E const& e ){ - using value_type = typename E::value_type; - if constexpr( E::_size == 0 ){ - return impl::extents_to_array_v; - }else if constexpr( is_scalar(E{}) || is_vector(E{}) ){ - using extents_with_ones = make_sequence_of_ones_t; - return impl::extents_to_array_v; - }else{ - if constexpr( std::is_same_v ){ - return make_static_strides_first_order(e, basic_static_extents{}); - }else{ - return make_static_strides_last_order(e, basic_static_extents{}); - } - } - } - - // It is use for first order to - // get std::array containing strides - template - inline static constexpr auto strides_helper_v = make_static_strides(ExtentsType{}); - -} // namespace boost::numeric::ublas::detail - -namespace boost::numeric::ublas -{ -/** @brief Partial Specialization for layout::first_order or column_major - * - * @code basic_static_strides, layout::first_order> s @endcode - * - * @tparam R rank of basic_static_extents - * @tparam Extents paramerter pack of extents - * - */ -template -class basic_static_strides, Layout> -{ - -public: - - static constexpr std::size_t const _size = sizeof...(Extents); - - using layout_type = Layout; - using extents_type = basic_static_extents; - using base_type = std::array; - using value_type = typename base_type::value_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - /** - * @param k pos of extent - * @returns the element at given pos - */ - [[nodiscard]] inline - constexpr const_reference at(size_type k) const - { - return m_data.at(k); - } - - [[nodiscard]] inline - constexpr const_reference operator[](size_type k) const { return m_data[k]; } - - //@returns the rank of basic_static_extents - [[nodiscard]] inline - constexpr size_type size() const noexcept { return static_cast(_size); } - - [[nodiscard]] inline - constexpr const_reference back () const{ - return m_data.back(); - } - - // default constructor - constexpr basic_static_strides() noexcept{ - static_assert( - _size == 0 || - ( is_valid(extents_type{}) && - ( is_vector(extents_type{}) || - is_scalar(extents_type{}) || - _size >= 2 - ) - ) - , - "Error in boost::numeric::ublas::basic_static_strides() : " - "Size cannot be 0 or Shape should be valid and shape can be vector or shape can be scalar or size should be greater than" - " or equal to 2" - ); - - - } - - constexpr basic_static_strides(extents_type const& e) noexcept{ (void)e; }; - - // default copy constructor - constexpr basic_static_strides(basic_static_strides const &other) noexcept = default; - constexpr basic_static_strides(basic_static_strides &&other) noexcept = default; - - // default assign constructor - constexpr basic_static_strides & - operator=(basic_static_strides const &other) noexcept = default; - - constexpr basic_static_strides & - operator=(basic_static_strides &&other) noexcept = default; - - ~basic_static_strides() = default; - - /** @brief Returns ref to the std::array containing extents */ - [[nodiscard]] inline - constexpr auto const& base() const noexcept{ - return m_data; - } - - /** @brief Returns pointer to the std::array containing extents */ - [[nodiscard]] inline - constexpr const_pointer data() const noexcept{ - return m_data.data(); - } - - [[nodiscard]] inline - constexpr const_iterator begin() const noexcept{ - return m_data.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator end() const noexcept{ - return m_data.end(); - } - - [[nodiscard]] inline - constexpr bool empty() const noexcept{ - return m_data.empty(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return m_data.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return m_data.rend(); - } - -private: - static constexpr base_type const m_data{ detail::strides_helper_v }; -}; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/strides.hpp b/include/boost/numeric/ublas/tensor/strides.hpp deleted file mode 100644 index eb78c24da..000000000 --- a/include/boost/numeric/ublas/tensor/strides.hpp +++ /dev/null @@ -1,99 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// -/// \file strides.hpp Definition for the basic_strides template class - -#ifndef _BOOST_UBLAS_TENSOR_STRIDES_HPP_ -#define _BOOST_UBLAS_TENSOR_STRIDES_HPP_ - -#include -#include -#include - -namespace boost::numeric::ublas{ - - template && is_strides_v - , int> = 0 - > - [[nodiscard]] inline - constexpr bool operator==(LStrides const& lhs, RStrides const& rhs) noexcept{ - static_assert( std::is_same_v, - "boost::numeric::ublas::operator==(LStrides,RStrides) : LHS value type should be the same as the RHS value type"); - - return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); - } - - template && is_strides_v - , int> = 0 - > - [[nodiscard]] inline - constexpr bool operator!=(LStrides const& lhs, RStrides const& rhs) noexcept{ - static_assert( std::is_same_v, - "boost::numeric::ublas::operator!=(LStrides,RStrides) : LHS value type should be the same as the RHS value type"); - return !( lhs == rhs ); - } - -} // namespace boost::numeric::ublas - - -namespace boost::numeric::ublas::detail { - - /** @brief Returns relative memory index with respect to a multi-index - * - * @code auto j = access(std::vector{3,4,5}, strides{shape{4,2,3},first_order}); @endcode - * - * @param[in] i multi-index of length p - * @param[in] w stride vector of length p - * @returns relative memory location depending on \c i and \c w - */ - template - [[nodiscard]] inline - constexpr auto access(std::vector const& i, Stride const& w) - { - static_assert( is_strides_v, - "boost::numeric::ublas::detail::access() : invalid type, the type should be a strides"); - - using value_type = typename Stride::value_type; - return std::inner_product(i.begin(), i.end(), w.begin(), value_type{}); - } - - /** @brief Returns relative memory index with respect to a multi-index - * - * @code auto j = access(strides{shape{4,2,3},first_order}, 2,3,4); @endcode - * - * @param[in] is the elements of the partial multi-index - * @param[in] sum the current relative memory index - * @returns relative memory location depending on \c i and \c w - */ - template - [[nodiscard]] inline - constexpr auto access(Stride const& w, Indices ... is) - { - static_assert( is_strides_v, - "boost::numeric::ublas::detail::access() : invalid type, the type should be a strides"); - - if constexpr( is_static_rank_v ){ - static_assert( Stride::_size >= sizeof...(is), - "boost::numeric::ublas::detail::access() : number of indices exceeds the size of the stride"); - } - - using value_type = typename Stride::value_type; - std::array i = {is...}; - return std::inner_product(i.begin(), i.end(), w.begin(), value_type{}); - } - -} // namespace boost::numeric::ublas::detail - -#endif diff --git a/include/boost/numeric/ublas/tensor/tags.hpp b/include/boost/numeric/ublas/tensor/tags.hpp index 079c2e783..7774f9ccb 100644 --- a/include/boost/numeric/ublas/tensor/tags.hpp +++ b/include/boost/numeric/ublas/tensor/tags.hpp @@ -1,17 +1,15 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany // -#ifndef BOOST_UBLAS_TENSOR_TAGS_IMPL_HPP -#define BOOST_UBLAS_TENSOR_TAGS_IMPL_HPP +#ifndef BOOST_UBLAS_TENSOR_TAGS_HPP +#define BOOST_UBLAS_TENSOR_TAGS_HPP namespace boost::numeric::ublas{ @@ -25,7 +23,7 @@ namespace boost::numeric::ublas{ struct storage_non_seq_container_tag{}; -} // namespace boost::numeric::ublas::tag +} // namespace boost::numeric::ublas -#endif +#endif // BOOST_UBLAS_TENSOR_TAGS_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor.hpp b/include/boost/numeric/ublas/tensor/tensor.hpp index f635f4fbe..02ceaa53a 100644 --- a/include/boost/numeric/ublas/tensor/tensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,46 +10,13 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // -#ifndef BOOST_UBLAS_TENSOR_IMPL_HPP -#define BOOST_UBLAS_TENSOR_IMPL_HPP +#ifndef BOOST_UBLAS_TENSOR_TENSOR_HPP +#define BOOST_UBLAS_TENSOR_TENSOR_HPP -#include -#include +#include "tensor/tensor_core.hpp" +#include "tensor/tensor_dynamic.hpp" +#include "tensor/tensor_engine.hpp" +#include "tensor/tensor_static_rank.hpp" +#include "tensor/tensor_static.hpp" -namespace boost::numeric::ublas{ - - template - using dynamic_tensor = tensor_core< - tensor_engine< - extents<>, - Layout, - strides< extents<> >, - std::vector< ValueType, std::allocator > - > - >; - - - template - using static_tensor = tensor_core< - tensor_engine< - ExtentsType, - Layout, - strides, - std::array< ValueType, product(ExtentsType{}) > - > - >; - - template - using fixed_rank_tensor = tensor_core< - tensor_engine< - extents, - Layout, - strides< extents >, - std::vector< ValueType, std::allocator > - > - >; - -} // namespace boost::numeric::ublas - - -#endif +#endif // BOOST_UBLAS_TENSOR_TENSOR_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp new file mode 100644 index 000000000..43591af63 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_CORE_HPP +#define BOOST_UBLAS_TENSOR_CORE_HPP + + +namespace boost::numeric::ublas { + +template +class tensor_core; + +} // namespace boost::numeric::ublas + +#endif // BOOST_UBLAS_TENSOR_CORE_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp new file mode 100644 index 000000000..ec27296a6 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -0,0 +1,466 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_DYNAMIC_HPP +#define BOOST_UBLAS_TENSOR_DYNAMIC_HPP + +#include + +#include "../algorithms.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../concepts.hpp" + +#include "tensor_engine.hpp" + + +namespace boost::numeric::ublas { + +template +using engine_tensor_dynamic = tensor_engine, L, std::vector>; + +template + class tensor_core> + : public detail::tensor_expression< + tensor_core>, + tensor_core>> +{ +public: + using engine_type = engine_tensor_dynamic; + using self_type = tensor_core; + + template + using tensor_expression_type = detail::tensor_expression; + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = typename container_traits_type::reference; + using const_reference = typename container_traits_type::const_reference; + + using pointer = typename container_traits_type::pointer; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename container_traits_type::iterator; + using const_iterator = typename container_traits_type::const_iterator; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using matrix_type = matrix >; + using vector_type = vector >; + + explicit tensor_core () = default; + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor{3,4,2}; @endcode + * + */ + template + explicit inline tensor_core (Is ... is) + : tensor_expression_type{} + , _extents{size_type(is)...} + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + } + + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor(extents{3,4,2}); @endcode + * + */ + explicit inline tensor_core (extents_type e) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + } + + /** @brief Constructs a tensor_core with a \c shape and initial value + * + * @code auto t = tensor(extents<>{4,3,2},5); @endcode + * + * @param i initial tensor_core with this value + */ + inline tensor_core (extents_type e, value_type i) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(to_strides(_extents,layout_type{})) + , _container(product(_extents),i) + { + } + + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data + * + * @code auto t = tensor(extents<>{3,4,2},std::vector(3*4*2,1.f)); @endcode + * + * @param e instance of \c extents<> specifying the dimensions of tensor + * @param a instance of \c std::vector to be copied + */ + inline tensor_core (extents_type e, container_type a) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(std::move(a)) + { + if(std::size(_container) != ublas::product(_extents)){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core: " + "Cannot construct tensor with specified std::vector instance. " + "Number of extents and std::vector size do not match."); + } + } + + + /** @brief Constructs a tensor_core with another tensor_core with a different layout + * + * @param other tensor_core with a different layout to be copied. + */ + template + explicit inline tensor_core (const tensor_core &other) + : tensor_expression_type{} + , _extents (ublas::begin(other.extents ()), ublas::end (other.extents ())) + , _strides (ublas::begin(other.extents ()), ublas::end (other.extents ())) + , _container( std::begin(other.container()), std::end (other.container())) + { + } + + + /** @brief Constructs a tensor_core with an tensor_core expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note type must be specified of tensor_core must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor_core expression + * @param size tensor_core expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (detail::tensor_expression const& expr) + : tensor_expression_type{} + , _extents (ublas::detail::retrieve_extents(expr)) + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + detail::eval(*this, expr); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core( matrix_type const& m ) + : tensor_expression_type{} + , _extents {m.size1(),m.size2()} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(m.data().begin(), m.data().end()) + { + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core (vector_type const& v) + : tensor_expression_type{} + , _extents {v.size(),1} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(v.data().begin(), v.data().end()) + { + } + + /** @brief Constructs a tensor_core with a matrix expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const matrix_expression_type &expr) + : tensor_core(matrix_type(expr)) + { + } + + /** @brief Constructs a tensor_core with a vector expression + * + * @code tensor_core A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const vector_expression_type &expr) + : tensor_core( vector_type ( expr ) ) + { + } + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + inline tensor_core (const tensor_core &t) + : tensor_expression_type{} + , _extents (t._extents ) + , _strides (t._strides ) + , _container(t._container) + {} + + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be moved. + */ + inline tensor_core (tensor_core &&t) noexcept + : tensor_expression_type{} + , _extents (std::move(t._extents )) + , _strides (std::move(t._strides )) + , _container(std::move(t._container)) + {} + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + tensor_core& operator=(tensor_core other) noexcept + { + swap (*this, other); + return *this; + } + + tensor_core& operator=(const_reference v) + { + std::fill_n(_container.begin(), _container.size(), v); + return *this; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference at (I1 i1, I2 i2, Is ... is) const + { + if(sizeof...(is)+2 != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core::at : " + "Cannot access tensor with multi-index. " + "Number of provided indices does not match with tensor order."); + } + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference at (I1 i1, I2 i2, Is ... is) + { + if(sizeof...(is)+2 != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core::at : " + "Cannot access tensor with multi-index." + "Number of provided indices does not match with tensor order."); + } + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference operator()(Is ... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference operator()(Is ... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference operator [] (size_type i) const { + return this->_container[i]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline reference operator [] (size_type i) { + return this->_container[i]; + } + + /** @brief Element access using a single-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + template + [[nodiscard]] inline const_reference at (size_type i) const { + return this->_container.at(i); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline reference at (size_type i) { + return this->_container.at(i); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto size = sizeof...(ps)+1; + if(size != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core : " + "Cannot multiply using Einstein notation. " + "Number of provided indices does not match with tensor order."); + } + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._extents , rhs._extents); + std::swap(lhs._strides , rhs._strides); + std::swap(lhs._container , rhs._container); + } + + + [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } + [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } + [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } + [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } + [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } + [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } + [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } + [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } + [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } + [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + [[nodiscard]] inline auto empty () const noexcept { return _container.empty(); } + [[nodiscard]] inline auto size () const noexcept { return _container.size(); } + [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline auto rank () const { return _extents.size(); } + [[nodiscard]] inline auto order () const { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _container.data();} + [[nodiscard]] inline auto data () noexcept -> pointer { return _container.data();} + [[nodiscard]] inline auto const& base () const noexcept { return _container; } + +private: + extents_type _extents; + strides_type _strides; + container_type _container; +}; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas{ +template +using tensor_dynamic = tensor_core, L, std::vector>>; +} // namespace boost::numeric::ublas + + +#endif + diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp new file mode 100644 index 000000000..dc6cbd790 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp @@ -0,0 +1,29 @@ +// +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_UBLAS_TENSOR_ENGINE_HPP +#define BOOST_UBLAS_TENSOR_ENGINE_HPP + +namespace boost::numeric::ublas{ + +template +struct tensor_engine +{ + using extents_type = E; + using layout_type = L; + using container_type = C; +}; + +} // namespace boost::numeric::ublas + + +#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp new file mode 100644 index 000000000..644ed9c51 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp @@ -0,0 +1,456 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_STATIC_HPP +#define BOOST_UBLAS_TENSOR_STATIC_HPP + +#include + +#include "../algorithms.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../concepts.hpp" + +#include "tensor_engine.hpp" + + + + +namespace boost::numeric::ublas::detail +{ +template +using engine_tensor_static = tensor_engine< + extents, L, std::array>> >; +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas { +template +class tensor_core> + : public detail::tensor_expression< + tensor_core>, + tensor_core>> +{ +public: + using engine_type = detail::engine_tensor_static; + using self_type = tensor_core; + + template + using tensor_expression_type = detail::tensor_expression; + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = typename container_traits_type::reference; + using const_reference = typename container_traits_type::const_reference; + + using pointer = typename container_traits_type::pointer; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename container_traits_type::iterator; + using const_iterator = typename container_traits_type::const_iterator; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using matrix_type = matrix >; + using vector_type = vector >; + + static_assert(std::tuple_size_v == ublas::product_v); + static_assert(0ul != ublas::product_v); + + /** @brief Constructs a tensor_core. + * + */ + constexpr inline tensor_core () noexcept = default; + + /** @brief Constructs a tensor_core with a \c shape + * + * + * @code tensor> A(4); @endcode + * + * @param v value with which tensor_core is initialized + */ + constexpr explicit inline tensor_core (value_type v) + : tensor_core() + { + std::fill_n(begin(),this->size(),v); + } + + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data + * + * @code auto a = tensor>(array); @endcode + * + * @param s initial tensor_core dimension extents + * @param a container of \c array_type that is copied according to the storage layout + */ + constexpr explicit inline tensor_core (container_type a) noexcept + : tensor_expression_type{} + , _container{std::move(a)} + { + } + + + /** @brief Constructs a tensor_core with another tensor_core with a different layout + * + * @param other tensor_core with a different layout to be copied. + */ + template + explicit inline tensor_core (const tensor_core &other) + : tensor_expression_type{} + , _container{} + { + if(_extents != other.extents()){ + throw std::invalid_argument("error in boost::numeric::ublas::tensor_core: extents do not match."); + } + + ublas::copy(this->rank(), this->extents().data(), + this->data(), this->strides().data(), + other.data(), other.strides().data()); + + } + + + /** @brief Constructs a tensor_core with an tensor_core expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note type must be specified of tensor_core must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor_core expression + * @param size tensor_core expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const detail::tensor_expression &expr) + : tensor_expression_type{} + , _container{} + { + detail::eval(*this, expr); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core( matrix_type const& m ) + { + static_assert(is_matrix_v); + if(m.size1() != std::get<0>(_extents) || m.size2() != std::get<1>(_extents) ){ + throw std::invalid_argument("error in boost::numeric::ublas::tensor_core: matrix and tensor dimensions do not match."); + } + std::copy(m.data().begin(), m.data().end(), this->begin()); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + constexpr explicit tensor_core (vector_type const& v) + { + static_assert(is_vector_v); + + if(v.size() != std::get<0>(_extents) && v.size() != std::get<1>(_extents) ){ + throw std::invalid_argument("error in boost::numeric::ublas::tensor_core: matrix and tensor dimensions do not match."); + } + std::copy(v.data().begin(), v.data().end(), this->begin()); + } + + /** @brief Constructs a tensor_core with a matrix expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const matrix_expression_type &expr) + : tensor_core(matrix_type(expr)) + { + } + + /** @brief Constructs a tensor_core with a vector expression + * + * @code tensor_core A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const vector_expression_type &expr) + : tensor_core( vector_type ( expr ) ) + { + } + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + constexpr inline tensor_core (const tensor_core &t) noexcept + : tensor_expression_type{} + , _container{t._container} + {} + + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be moved. + */ + constexpr inline tensor_core (tensor_core &&t) noexcept + : tensor_expression_type{} + , _container (std::move(t._container)) + {} + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + constexpr tensor_core& operator=(tensor_core other) noexcept + { + swap (*this, other); + return *this; + } + + constexpr tensor_core& operator=(const_reference v) noexcept + { + std::fill_n(this->_container.begin(), this->_container.size(), v); + return *this; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference at (I1 i1, I2 i2, Is ... is) const + { + static_assert (sizeof...(is)+2 == ublas::size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is... ); + return _container[idx]; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference at (I1 i1, I2 i2, Is ... is) + { + static_assert (sizeof...(is)+2 == ublas::size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is... ); + return _container[idx]; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr const_reference operator()(Is ... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr reference operator()(Is ... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline constexpr const_reference operator [] (size_type i) const { + return this->_container[i]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline constexpr reference operator [] (size_type i) { + return this->_container[i]; + } + + /** @brief Element access using a single-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline constexpr const_reference at (size_type i) const { + return this->_container.at(i); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline constexpr reference at (size_type i) { + return this->_container.at(i); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto size = sizeof...(ps)+1; + static_assert(size == ublas::size_v); + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._container, rhs._container); + } + + + [[nodiscard]] inline constexpr auto begin () const noexcept -> const_iterator { return _container.begin (); } + [[nodiscard]] inline constexpr auto end () const noexcept -> const_iterator { return _container.end (); } + [[nodiscard]] inline constexpr auto begin () noexcept -> iterator { return _container.begin (); } + [[nodiscard]] inline constexpr auto end () noexcept -> iterator { return _container.end (); } + [[nodiscard]] inline constexpr auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } + [[nodiscard]] inline constexpr auto cend () const noexcept -> const_iterator { return _container.cend (); } + [[nodiscard]] inline constexpr auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } + [[nodiscard]] inline constexpr auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } + [[nodiscard]] inline constexpr auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline constexpr auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } + [[nodiscard]] inline constexpr auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline constexpr auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + + [[nodiscard]] inline constexpr auto empty () const noexcept { return _container.empty(); } + [[nodiscard]] inline constexpr auto size () const noexcept { return _container.size(); } + [[nodiscard]] inline constexpr auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline constexpr auto rank () const noexcept { return ublas::size_v; } + [[nodiscard]] inline constexpr auto order () const noexcept { return this->rank(); } + + [[nodiscard]] constexpr inline auto const& strides () const noexcept{ return _strides; } + [[nodiscard]] inline constexpr auto const& extents () const noexcept{ return _extents; } + [[nodiscard]] inline constexpr const_pointer data () const noexcept{ return _container.data();} + [[nodiscard]] inline constexpr pointer data () noexcept{ return _container.data();} + [[nodiscard]] inline constexpr auto const& base () const noexcept{ return _container; } + + + + +private: + static constexpr extents_type _extents = extents_type{}; + static constexpr strides_type _strides = to_strides_v; + container_type _container; +}; + + + +//template +//static constexpr inline auto make_tensor( +// typename tensor_static::base_type && a, +// typename tensor_static::extents_type && /*unused*/, +// typename tensor_static::layout_type && /*unused*/) +//{ +// return tensor_static( a ); +//} + + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas{ + +template +using tensor_static = tensor_core>>>; + +} + +namespace boost::numeric::ublas::experimental +{ +template +using matrix_static = tensor_static, L>; + +template +using vector_static = tensor_static, L>; +} // namespace boost::numeric::ublas::experimental + +#endif + diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp new file mode 100644 index 000000000..fbb5074db --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -0,0 +1,473 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_STATIC_RANK_HPP +#define BOOST_UBLAS_TENSOR_STATIC_RANK_HPP + +#include + +#include "../algorithms.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../concepts.hpp" + +#include "tensor_engine.hpp" + + + +namespace boost::numeric::ublas { + +template +using engine_tensor_static_rank = tensor_engine, L, std::vector>; + +template + class tensor_core> + : public detail::tensor_expression< + tensor_core>, + tensor_core>> +{ +public: + using engine_type = engine_tensor_static_rank; + using self_type = tensor_core; + + template + using tensor_expression_type = detail::tensor_expression; + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = typename container_traits_type::reference; + using const_reference = typename container_traits_type::const_reference; + + using pointer = typename container_traits_type::pointer; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename container_traits_type::iterator; + using const_iterator = typename container_traits_type::const_iterator; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using matrix_type = matrix >; + using vector_type = vector >; + + tensor_core () = default; + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor(extents<3>{3,4,2}); @endcode + * + */ + explicit inline tensor_core (extents_type e) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + } + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor{3,4,2}; @endcode + * + */ + template + explicit inline tensor_core (Is ... is) + : tensor_core(extents_type{size_type(is)...}) + { + } + + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data + * + * @code auto t = tensor(extents<>{3,4,2},std::vector(3*4*2,1.f)); @endcode + * + * @param e instance of \c extents<> specifying the dimensions of tensor + * @param a instance of \c std::vector to be copied + */ + inline tensor_core (extents_type e, container_type a) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(std::move(a)) + { + if(std::size(_container) != ublas::product(_extents)){ + throw std::length_error("boost::numeric::ublas::tensor_static_rank : " + "Cannot construct tensor with specified container and extents. " + "Number of container elements do not match with the specified extents."); + } + } + + /** @brief Constructs a tensor_core with another tensor_core with a different layout + * + * @param other tensor_core with a different layout to be copied. + */ + template + explicit inline tensor_core (const tensor_core &other) + : tensor_expression_type{} + , _extents (ublas::begin(other.extents()),ublas::end (other.extents ())) + , _strides (ublas::to_strides(_extents)) + , _container(std::begin(other.container()),std::end (other.container())) + { + } + + + /** @brief Constructs a tensor_core with an tensor_core expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note type must be specified of tensor_core must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor_core expression + * @param size tensor_core expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (detail::tensor_expression const& expr) + : tensor_expression_type{} + , _extents (ublas::detail::retrieve_extents(expr)) + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + detail::eval(*this, expr); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core( matrix_type const& m ) + : tensor_expression_type{} + , _extents {m.size1(),m.size2()} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(m.data().begin(), m.data().end()) + { + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core (vector_type const& v) + : tensor_expression_type{} + , _extents {v.size(),1} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(v.data().begin(), v.data().end()) + { + } + + /** @brief Constructs a tensor_core with a matrix expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const matrix_expression_type &expr) + : tensor_core(matrix_type(expr)) + { + } + + /** @brief Constructs a tensor_core with a vector expression + * + * @code tensor_core A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const vector_expression_type &expr) + : tensor_core( vector_type ( expr ) ) + { + } + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + inline tensor_core (const tensor_core &t) noexcept + : tensor_expression_type{} + , _extents (t._extents ) + , _strides (t._strides ) + , _container(t._container) + {} + + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be moved. + */ + inline tensor_core (tensor_core &&t) noexcept + : tensor_expression_type{} + , _extents (std::move(t._extents )) + , _strides (std::move(t._strides )) + , _container(std::move(t._container)) + {} + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + tensor_core& operator=(tensor_core other) noexcept + { + swap (*this, other); + return *this; + } + + tensor_core& operator=(container_type c) + { + if( c.size() != this->size()){ + throw std::length_error("boost::numeric::ublas::tensor_core: " + "Cannot assign provided container to tensor." + "Number of elements do not match."); + } + _container = std::move(c); + return *this; + } + + tensor_core& operator=(const_reference v) + { + std::fill_n(_container.begin(), _container.size(), v); + return *this; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference at (I1 i1, I2 i2, Is ... is) const + { + static_assert (sizeof...(is)+2 == std::tuple_size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference at (I1 i1, I2 i2, Is ... is) + { + static_assert (sizeof...(Is)+2 == std::tuple_size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference operator()(Is ... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference operator()(Is ... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference operator [] (size_type i) const { + return this->_container[i]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline reference operator [] (size_type i) { + return this->_container[i]; + } + + /** @brief Element access using a single-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference at (size_type i) const { + return this->_container.at(i); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline reference at (size_type i) { + return this->_container.at(i); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto size = sizeof...(index_types)+1; + static_assert(size == std::tuple_size_v); + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._extents , rhs._extents ); + std::swap(lhs._strides , rhs._strides ); + std::swap(lhs._container , rhs._container); + } + + + [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } + [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } + [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } + [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } + [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } + [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } + + [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } + [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } + [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } + [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + [[nodiscard]] inline auto empty () const noexcept { return _container.empty(); } + [[nodiscard]] inline auto size () const noexcept { return _container.size(); } + [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline constexpr auto rank () const noexcept { return std::tuple_size_v; } + [[nodiscard]] inline constexpr auto order () const noexcept { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _container.data();} + [[nodiscard]] inline auto data () noexcept -> pointer { return _container.data();} + [[nodiscard]] inline auto const& base () const noexcept { return _container; } + + +private: + extents_type _extents; + strides_type _strides; + container_type _container; +}; + +/** @brief Type for create a dynamic tensor instance with dynamic non-resizable extents + * + * @code + * // defines a 4-dimensional tensor type + * // tensor_core,layout::first_order,std::vector>> + * + * using ftensor = tensor_mixed; + * + * // instantiates a 4-dimension + * auto t = ftensor{{5,6,4,3}}; + * + * @endcode + * + * */ + +template +using tensor_static_rank = tensor_core>; + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas::experimental +{ + +template +using matrix = tensor_core>; + +template +using vector = tensor_core>; + +} // namespace boost::numeric::ublas::experimental + + +#endif // BOOST_UBLAS_TENSOR_STATIC_RANK_HPP + diff --git a/include/boost/numeric/ublas/tensor/tensor_core.hpp b/include/boost/numeric/ublas/tensor/tensor_core.hpp deleted file mode 100644 index 609c9e15e..000000000 --- a/include/boost/numeric/ublas/tensor/tensor_core.hpp +++ /dev/null @@ -1,886 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -/// \file tensor_core.hpp Definition for the tensor template class - -#ifndef BOOST_UBLAS_TENSOR_CORE_IMPL_HPP -#define BOOST_UBLAS_TENSOR_CORE_IMPL_HPP - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace boost::numeric::ublas { - -template< class T > -class tensor_core: - public detail::tensor_expression< tensor_core,tensor_core > -{ - - using self_type = tensor_core; - -public: - using tensor_traits = T; - - template - using tensor_expression_type = detail::tensor_expression; - - template - using matrix_expression_type = matrix_expression; - - template - using vector_expression_type = vector_expression; - - using super_type = tensor_expression_type; - using storage_traits_type = typename tensor_traits::storage_traits_type; - - using array_type = typename storage_traits_type::array_type; - using layout_type = typename tensor_traits::layout_type; - - - using size_type = typename storage_traits_type::size_type; - using difference_type = typename storage_traits_type::difference_type; - using value_type = typename storage_traits_type::value_type; - - using reference = typename storage_traits_type::reference; - using const_reference = typename storage_traits_type::const_reference; - - using pointer = typename storage_traits_type::pointer; - using const_pointer = typename storage_traits_type::const_pointer; - - using iterator = typename storage_traits_type::iterator; - using const_iterator = typename storage_traits_type::const_iterator; - - using reverse_iterator = typename storage_traits_type::reverse_iterator; - using const_reverse_iterator = typename storage_traits_type::const_reverse_iterator; - - using tensor_temporary_type = self_type; - using storage_category = dense_tag; - using container_tag = typename storage_traits_type::container_tag; - using resizable_tag = typename storage_traits_type::resizable_tag; - - using extents_type = typename tensor_traits::extents_type; - using strides_type = typename tensor_traits::strides_type; - - using matrix_type = matrix >; - using vector_type = vector >; - - /** @brief Constructs a tensor_core. - * - * @note the tensor_core is empty. - * @note the tensor_core needs to reshaped for further use. - * - */ - inline - constexpr tensor_core () - { - if constexpr( is_static_v ){ - auto temp = tensor_core(extents_type{},resizable_tag{}); - swap(*this,temp); - } - } - - constexpr tensor_core( extents_type e, [[maybe_unused]] storage_resizable_container_tag t ) - : tensor_expression_type() - , extents_(std::move(e)) - , strides_(extents_) - , data_( product(extents_) ) - {} - - constexpr tensor_core( extents_type e, [[maybe_unused]] storage_static_container_tag t ) - : tensor_expression_type() - , extents_(std::move(e)) - , strides_(extents_) - { - if ( data_.size() < product(extents_) ){ - throw std::length_error("boost::numeric::ublas::tensor_core(extents_type const&, storage_static_container_tag): " - "size of requested storage exceeds the current container size" - ); - } - } - - /** @brief Constructs a tensor_core with an initializer list for dynamic_extents - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{4,2,3}; @endcode - * - * @param l initializer list for setting the dimension extents of the tensor_core - */ - template> - > - explicit inline - tensor_core (std::initializer_list l) - : tensor_core( std::move( extents_type( std::move(l) ) ), resizable_tag{} ) - {} - - /** @brief Constructs a tensor_core with a \c shape - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{extents{4,2,3}}; @endcode - * - * @param s initial tensor_core dimension extents - */ - template> - > - explicit inline - tensor_core (extents_type s) - : tensor_core( std::move(s), resizable_tag{} ) - {} - - /** @brief Constructs a tensor_core with a \c shape - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{extents{4,2,3}}; @endcode - * - * @param s initial tensor_core dimension extents - * @param i initial tensor_core with this value - */ - template> - > - explicit inline - tensor_core (extents_type s, value_type const& i) - : tensor_core( std::move(s), resizable_tag{} ) - { - std::fill(begin(),end(),i); - } - - /** @brief Constructs a tensor_core with a \c shape - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{}; @endcode - * - * @param i initial tensor_core with this value - */ - template> - > - explicit inline - tensor_core (value_type const& i) - : tensor_core() - { - std::fill(begin(),end(),i); - } - - /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data - * - * @code tensor_core A{extents{4,2,3}, array }; @endcode - * - * - * @param s initial tensor_core dimension extents - * @param a container of \c array_type that is copied according to the storage layout - */ - template> - > - inline - tensor_core (extents_type s, const array_type &a) - : tensor_core( std::move(s), resizable_tag{} ) - { - if( size() != a.size() ){ - throw std::runtime_error("boost::numeric::ublas::tensor_core(extents_type,array_type): " - "array size mismatch with extents" - ); - } - std::copy(a.begin(),a.end(),begin()); - } - - /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data - * - * @code tensor_core A{ array }; @endcode - * - * @param a container of \c array_type that is copied according to the storage layout - */ - template> - > - inline - tensor_core (const array_type &a) - : tensor_core() - { - if( size() != a.size() ){ - throw std::runtime_error("boost::numeric::ublas::tensor_core(extents_type,array_type): " - "array size mismatch with extents" - ); - } - std::copy(a.begin(),a.end(),begin()); - } - - - /** @brief Constructs a tensor_core with another tensor_core with a different layout - * - * @param other tensor_core with a different layout to be copied. - */ - template - tensor_core (const tensor_core &other) - : tensor_core( other.extents(), resizable_tag{} ) - { - copy(this->rank(), this->extents().data(), - this->data(), this->strides().data(), - other.data(), other.strides().data()); - - } - - - /** @brief Constructs a tensor_core with an tensor_core expression - * - * @code tensor_core A = B + 3 * C; @endcode - * - * @note type must be specified of tensor_core must be specified. - * @note dimension extents are extracted from tensors within the expression. - * - * @param expr tensor_core expression - * @param size tensor_core expression - */ - template - tensor_core (const detail::tensor_expression &expr) - : tensor_core( detail::retrieve_extents(expr), resizable_tag{} ) - { - static_assert(is_valid_tensor_v, - "boost::numeric::ublas::tensor_core(tensor_expression const&) : " - "other_tensor should be a valid tensor type" - ); - - static_assert(std::is_same_v, - "boost::numeric::ublas::tensor_core(tensor_expression const&) : " - "LHS and RHS should have the same value type" - ); - - detail::eval( *this, expr ); - } - - constexpr tensor_core( matrix_type const& v ) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{v.size1(), v.size2()}); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(const matrix &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const matrix &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - - if( extents_[0] != v.size1() || extents_[1] != v.size2() ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const matrix &v)" - " : please set the extents properly, the extents should contain the row and col of the matrix" - ); - } - - std::copy(v.data().begin(), v.data().end(), data_.begin()); - } - - constexpr tensor_core( matrix_type && v ) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{v.size1(), v.size2()}); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(matrix &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(matrix &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - if( extents_[0] != v.size1() || extents_[1] != v.size2() ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(matrix &&v)" - " : please set the extents properly, the extents should contain the row and col of the matrix" - ); - } - - std::move(v.data().begin(), v.data().end(),data_.begin()); - } - - constexpr tensor_core (const vector_type &v) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{ v.size(), typename extents_type::value_type{1} }); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(const vector_type &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const vector_type &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - if( extents_[0] != v.size() || extents_[1] != 1ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const vector_type &v)" - " : please set the extents properly, the first extent should be the size of the vector and 1 for the second extent" - ); - } - - std::copy(v.data().begin(), v.data().end(), data_.begin()); - - } - - constexpr tensor_core (vector_type &&v) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{ v.size(), typename extents_type::value_type{1} }); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(vector_type &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(vector_type &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - if( extents_[0] != v.size() || extents_[1] != 1ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(vector_type &&v)" - " : please set the extents properly, the first extent should be the size of the vector and 1 for the second extent" - ); - } - - std::move(v.data().begin(), v.data().end(),data_.begin()); - - } - - /** @brief Constructs a tensor_core with a matrix expression - * - * @code tensor_core A = B + 3 * C; @endcode - * - * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. - * @note extents are automatically extracted from the temporary matrix - * - * @param expr matrix expression - */ - template - tensor_core (const matrix_expression_type &expr) - : tensor_core( matrix_type ( expr ) ) - { - } - - /** @brief Constructs a tensor_core with a vector expression - * - * @code tensor_core A = b + 3 * b; @endcode - * - * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. - * @note extents are automatically extracted from the temporary matrix - * - * @param expr vector expression - */ - template - tensor_core (const vector_expression_type &expr) - : tensor_core( vector_type ( expr ) ) - { - } - - - /** @brief Constructs a tensor_core from another tensor_core - * - * @param v tensor_core to be copied. - */ - inline - tensor_core (const tensor_core &v) - : tensor_expression_type() - , extents_ (v.extents_) - , strides_ (v.strides_) - , data_ (v.data_ ) - {} - - - - /** @brief Constructs a tensor_core from another tensor_core - * - * @param v tensor_core to be moved. - */ - inline - tensor_core (tensor_core &&v) noexcept - : tensor_expression_type() //tensor_container () - , extents_ (std::move(v.extents_)) - , strides_ (std::move(v.strides_)) - , data_ (std::move(v.data_ )) - {} - - - /** @brief Move assignsment operator - * - * @param v tensor_core to be moved. - */ - inline - tensor_core& operator=(tensor_core &&v) noexcept - { - swap(*this,v); - return *this; - } - - /// @brief Default destructor - ~tensor_core() = default; - - /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core - * - * @code A = B + C * 2; @endcode - * - * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. - * - * @param expr expression that is evaluated. - */ - template - tensor_core &operator = (const tensor_expression_type &expr) - { - detail::eval(*this, expr); - return *this; - } - - tensor_core& operator=(tensor_core const& other) - { - tensor_core temp(other); - swap (*this, temp); - return *this; - } - - constexpr tensor_core& operator=(const_reference v) - { - std::fill_n(this->begin(), this->size(), v); - return *this; - } - - /** @brief Returns true if the tensor_core is empty (\c size==0) */ - [[nodiscard]] inline - constexpr bool empty () const noexcept{ - return this->data_.empty(); - } - - /** @brief Returns the upper bound or max size of the tensor_core */ - [[nodiscard]] inline - constexpr size_type size() const noexcept{ - return this->data_.size(); - } - - /** @brief Returns the size of the tensor_core */ - [[nodiscard]] inline - constexpr size_type size (size_type r) const { - return this->extents_.at(r); - } - - /** @brief Returns the number of dimensions/modes of the tensor_core */ - [[nodiscard]] inline - constexpr size_type rank () const noexcept{ - return this->extents_.size(); - } - - /** @brief Returns the number of dimensions/modes of the tensor_core */ - [[nodiscard]] inline - constexpr size_type order () const noexcept{ - return this->extents_.size(); - } - - /** @brief Returns the strides of the tensor_core */ - [[nodiscard]] inline - constexpr strides_type const& strides () const noexcept{ - return this->strides_; - } - - /** @brief Returns the extents of the tensor_core */ - [[nodiscard]] inline - constexpr extents_type const& extents () const noexcept{ - return this->extents_; - } - - /** @brief Returns the strides of the tensor_core */ - [[nodiscard]] inline - constexpr strides_type& strides () noexcept{ - return this->strides_; - } - - /** @brief Returns the extents of the tensor_core */ - [[nodiscard]] inline - constexpr extents_type& extents () noexcept{ - return this->extents_; - } - - /** @brief Returns a \c const reference to the container. */ - [[nodiscard]] inline - constexpr const_pointer data () const noexcept{ - return this->data_.data(); - } - - /** @brief Returns a \c const reference to the container. */ - [[nodiscard]] inline - constexpr pointer data () noexcept{ - return this->data_.data(); - } - - /** @brief Returns a \c const reference to the underlying container. */ - [[nodiscard]] inline - constexpr array_type const& base () const noexcept{ - return data_; - } - - /** @brief Returns a reference to the underlying container. */ - [[nodiscard]] inline - constexpr array_type& base () noexcept{ - return data_; - } - - /** @brief Element access using a single index. - * - * @code auto a = A[i]; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - [[nodiscard]] inline - constexpr const_reference operator [] (size_type i) const { - return this->data_[i]; - } - - /** @brief Element access using a single index. - * - * @code auto a = A[i]; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - [[nodiscard]] inline - constexpr reference operator [] (size_type i) { - return this->data_[i]; - } - - /** @brief Element access using a multi-index or single-index with bound checking - * and it throws the exception. - * - * @code auto a = A.at(i,j,k); @endcode or - * @code auto a = A.at(i); @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr const_reference at (size_type i, Indices ... is) const { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_.at(i); - }else{ - if( sizeof...(is) + 1 > strides_.size() ){ - throw std::runtime_error("Error in boost::numeric::ublas::at(size_type, Indices...): " - "number of variadic argument exceeds the strides size." - ); - } - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::at(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_.at(idx); - } - } - - /** @brief Element access using a multi-index or single-index with bound checking - * and it throws the exception. - * - * - * @code A.at(i,j,k) = a; @endcode or - * @code A.at(i) = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr reference at (size_type i, Indices ... is) { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_.at(i); - }else{ - if( sizeof...(is) + 1 > strides_.size() ){ - throw std::runtime_error("Error in boost::numeric::ublas::at(size_type, Indices...): " - "number of variadic argument exceeds the strides size." - ); - } - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::at(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_.at(idx); - } - } - - /** @brief Element access using a multi-index or single-index with no bound checking - * and it does not throw. - * - * - * @code auto a = A(i,j,k); @endcode or - * @code auto a = A(i); @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr const_reference operator() (size_type i, Indices ... is) const { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_[i]; - }else{ - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::operator()(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_[idx]; - } - } - - /** @brief Element access using a multi-index or single-index with no bound checking - * and it does not throw. - * - * - * @code A(i,j,k) = a; @endcode or - * @code A(i) = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr reference operator() (size_type i, Indices ... is) { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_[i]; - }else{ - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::operator()(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_[idx]; - } - } - - /** @brief Generates a tensor_core index for tensor_core contraction - * - * - * @code auto Ai = A(_i,_j,k); @endcode - * - * @param i placeholder - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr decltype(auto) operator() (index::index_type p, index_types ... ps) const - { - constexpr auto N = sizeof...(ps)+1; - if( N != this->rank() ) - throw std::runtime_error("Error in boost::numeric::ublas::operator(index::index_type,index_types&&): " - "size of provided index_types does not match with the rank." - ); - - return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); - } - - - /** @brief Reshapes the basic_tensor - * - * - * (1) @code A.reshape(extents{m,n,o}); @endcode or - * (2) @code A.reshape(extents{m,n,o},4); @endcode - * - * If the size of this smaller than the specified extents than - * default constructed (1) or specified (2) value is appended. - * - * @note rank of the basic_tensor might also change. - * - * @param e extents with which the basic_tensor is reshaped. - * @param v value which is appended if the basic_tensor is enlarged. - */ - inline - void reshape (extents_type const& e, value_type v = value_type{}) - { - static_assert(is_dynamic_v && is_dynamic_v, - "Error in boost::numeric::ublas::basic_tensor::reshape(extents_type const&,value_type) : " - "static extents or static strides cannot used inside reshape function" - ); - - this->extents_ = e; - this->strides_ = strides_type(this->extents_); - - auto p = product(extents_); - if constexpr( !std::is_same_v< resizable_tag, storage_resizable_container_tag > ){ - if( p != this->size() ){ - throw std::runtime_error( - "boost::numeric::ublas::basic_tensor::reshape(extents_type const&,value_type) : " - "cannot resize the non-resizable container, change the extents such a way that the product does not change" - ); - } - }else{ - if(p != this->size()) - this->data_.resize (p, v); - } - } - - friend void swap(tensor_core& lhs, tensor_core& rhs){ - std::swap(lhs.data_ , rhs.data_ ); - std::swap(lhs.extents_, rhs.extents_); - std::swap(lhs.strides_, rhs.strides_); - } - - - /// \brief return an iterator on the first element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator begin () const noexcept{ - return data_.begin (); - } - - /// \brief return an iterator on the first element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator cbegin () const noexcept{ - return data_.cbegin (); - } - - /// \brief return an iterator after the last element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator end () const noexcept{ - return data_.end(); - } - - /// \brief return an iterator after the last element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator cend () const noexcept{ - return data_.cend (); - } - - /// \brief Return an iterator on the first element of the tensor_core - [[nodiscard]] inline - constexpr iterator begin () noexcept{ - return data_.begin(); - } - - /// \brief Return an iterator at the end of the tensor_core - [[nodiscard]] inline - constexpr iterator end () noexcept{ - return data_.end(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor_core (i.e. end() of normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator rbegin () const noexcept{ - return data_.rbegin(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor_core (i.e. end() of normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator crbegin () const noexcept{ - return data_.crbegin(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor_core (i.e. first element of the normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator rend () const noexcept{ - return data_.rend(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor_core (i.e. first element of the normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator crend () const noexcept{ - return data_.crend(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor_core (i.e. end() of normal tensor_core) - [[nodiscard]] inline - constexpr reverse_iterator rbegin () noexcept{ - return data_.rbegin(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor_core (i.e. first element of the normal tensor_core) - [[nodiscard]] inline - constexpr reverse_iterator rend () noexcept{ - return data_.rend(); - } - -private: - - extents_type extents_; - strides_type strides_; - array_type data_; -}; - -} // namespaces - -#endif diff --git a/include/boost/numeric/ublas/tensor/tensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor_engine.hpp deleted file mode 100644 index 8f9293e05..000000000 --- a/include/boost/numeric/ublas/tensor/tensor_engine.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_ENGINE_IMPL_HPP -#define BOOST_UBLAS_TENSOR_ENGINE_IMPL_HPP - -#include - -namespace boost::numeric::ublas{ - - template - struct tensor_engine; - - template - struct tensor_engine{ - using extents_type = ExtentsType; - - static_assert(is_extents_v, - "boost::numeric::ublas::tensor_engine : please provide valid tensor extents type" - ); - - using layout_type = LayoutType; - using strides_type = typename StrideType::template type; - - static_assert(is_strides_v, - "boost::numeric::ublas::tensor_engine : please provide valid tensor layout type" - ); - - using storage_traits_type = storage_traits; - - }; - - template - struct tensor_engine - : tensor_engine< ExtentsType, LayoutType, strides, StorageType > - {}; - -} // namespace boost::numeric::ublas - - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp b/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp index 7d75800ba..ba6510194 100644 --- a/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp +++ b/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp @@ -1,64 +1,31 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// #ifndef BOOST_UBLAS_TENSOR_BASIC_TYPE_TRAITS_HPP #define BOOST_UBLAS_TENSOR_BASIC_TYPE_TRAITS_HPP #include #include +#include +#include namespace boost::numeric::ublas { - -/** @brief Checks if the extents or strides is dynamic - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template struct is_dynamic : std::false_type {}; - -template -inline static constexpr bool const is_dynamic_v = is_dynamic::value; - -/** @brief Checks if the extents or strides is static - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template struct is_static : std::false_type {}; - -template -inline static constexpr bool const is_static_v = is_static::value; -/** @brief Checks if the extents or strides has dynamic rank - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template -struct is_dynamic_rank : std::false_type {}; -template -inline static constexpr bool const is_dynamic_rank_v = is_dynamic_rank::value; +template +struct is_complex : std::false_type{}; -/** @brief Checks if the extents or strides has static rank - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template -struct is_static_rank : std::false_type {}; +template +struct is_complex< std::complex > : std::true_type{}; -template -inline static constexpr bool const is_static_rank_v = is_static_rank::value; +template +inline static constexpr bool is_complex_v = is_complex::value; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp b/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp index 4e5619966..e7bef80ae 100644 --- a/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp +++ b/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp @@ -1,52 +1,51 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com + // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany +// The authors gratefully acknowledge the support of Google // #ifndef BOOST_UBLAS_TRAITS_STORAGE_HPP #define BOOST_UBLAS_TRAITS_STORAGE_HPP -#include #include -#include +#include -namespace boost { -namespace numeric { -namespace ublas { +#include "../tags.hpp" +namespace boost::numeric::ublas +{ -template -struct storage_traits; +template +struct container_traits; template -struct storage_traits> +struct container_traits> { - using array_type = std::vector; + using container_type = std::vector; - using size_type = typename array_type::size_type; - using difference_type = typename array_type::difference_type; - using value_type = typename array_type::value_type; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using value_type = typename container_type::value_type; - using reference = typename array_type::reference; - using const_reference = typename array_type::const_reference; + using reference = typename container_type::reference; + using const_reference = typename container_type::const_reference; - using pointer = typename array_type::pointer; - using const_pointer = typename array_type::const_pointer; + using pointer = typename container_type::pointer; + using const_pointer = typename container_type::const_pointer; - using iterator = typename array_type::iterator; - using const_iterator = typename array_type::const_iterator; + using iterator = typename container_type::iterator; + using const_iterator = typename container_type::const_iterator; - using reverse_iterator = typename array_type::reverse_iterator; - using const_reverse_iterator = typename array_type::const_reverse_iterator; + using reverse_iterator = typename container_type::reverse_iterator; + using const_reverse_iterator = typename container_type::const_reverse_iterator; using container_tag = storage_seq_container_tag; using resizable_tag = storage_resizable_container_tag; @@ -57,25 +56,25 @@ struct storage_traits> template -struct storage_traits> +struct container_traits> { - using array_type = std::array; + using container_type = std::array; - using size_type = typename array_type::size_type; - using difference_type = typename array_type::difference_type; - using value_type = typename array_type::value_type; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using value_type = typename container_type::value_type; - using reference = typename array_type::reference; - using const_reference = typename array_type::const_reference; + using reference = typename container_type::reference; + using const_reference = typename container_type::const_reference; - using pointer = typename array_type::pointer; - using const_pointer = typename array_type::const_pointer; + using pointer = typename container_type::pointer; + using const_pointer = typename container_type::const_pointer; - using iterator = typename array_type::iterator; - using const_iterator = typename array_type::const_iterator; + using iterator = typename container_type::iterator; + using const_iterator = typename container_type::const_iterator; - using reverse_iterator = typename array_type::reverse_iterator; - using const_reverse_iterator = typename array_type::const_reverse_iterator; + using reverse_iterator = typename container_type::reverse_iterator; + using const_reverse_iterator = typename container_type::const_reverse_iterator; using container_tag = storage_seq_container_tag; using resizable_tag = storage_static_container_tag; @@ -87,38 +86,40 @@ struct storage_traits> using rebind_size = std::array; }; -} // ublas -} // numeric -} // boost +} // namespace boost::numeric::ublas namespace boost::numeric::ublas { + +template +class basic_static_extents; + namespace detail{ template struct rebind_storage_size_helper{ using type = A; }; - template - struct rebind_storage_size_helper, A, storage_static_container_tag>{ - using type = typename storage_traits::template rebind_size< E0 * (Es * ...) >; + template + struct rebind_storage_size_helper, C, storage_static_container_tag>{ + using type = typename container_traits::template rebind_size< E0 * (Es * ...) >; }; - template - struct rebind_storage_size_helper, A, storage_static_container_tag>{ - using type = typename storage_traits::template rebind_size< 0 >; + template + struct rebind_storage_size_helper, C, storage_static_container_tag>{ + using type = typename container_traits::template rebind_size< 0 >; }; - } + } //namespace detail - template + template struct rebind_storage_size - : detail::rebind_storage_size_helper::resizable_tag + : detail::rebind_storage_size_helper::resizable_tag > {}; - template - using rebind_storage_size_t = typename rebind_storage_size::type; + template + using rebind_storage_size_t = typename rebind_storage_size::type; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp deleted file mode 100644 index 54a11a471..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_EXTENTS_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_extents; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_extents< basic_extents > : std::true_type {}; - - template - struct is_dynamic< basic_extents > : std::true_type {}; - - template - struct is_dynamic_rank< basic_extents > : std::true_type {}; - - - namespace detail{ - - template <> struct dynamic_extents_impl<> { - using type = basic_extents; - }; - - } // namespace detail - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp deleted file mode 100644 index 0eaf8ff9e..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_STRIDES_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_extents; - -template -class basic_strides; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_strides> : std::true_type {}; - - template - struct is_dynamic< basic_strides > : std::true_type {}; - - template - struct is_dynamic_rank< basic_strides > : std::true_type {}; - - template - struct strides> - { - template - using type = basic_strides; - }; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp deleted file mode 100644 index 14ed34870..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_EXTENTS_HPP - -namespace boost::numeric::ublas { - -/// @brief checks if the type is tensor extents or not -template -struct is_extents : std::false_type {}; - -template -inline static constexpr bool const is_extents_v = is_extents::value; - -namespace detail{ - - template - struct dynamic_extents_impl; - -} // detail - -template -using extents = typename detail::dynamic_extents_impl::type; - -} // namespace boost::numeric::ublas - -#include -#include -#include - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp deleted file mode 100644 index f1cbfba38..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_EXTENTS_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_fixed_rank_extents; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_extents< basic_fixed_rank_extents > : std::true_type {}; - - template - struct is_dynamic< basic_fixed_rank_extents > : std::true_type {}; - - template - struct is_static_rank< basic_fixed_rank_extents > : std::true_type {}; - - namespace detail{ - - template struct dynamic_extents_impl { - using type = basic_fixed_rank_extents; - }; - - } // namespace detail - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp deleted file mode 100644 index 2e378a269..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_STRIDES_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_fixed_rank_extents; - -template class basic_fixed_rank_strides; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_strides< basic_fixed_rank_strides< T, R, L> > : std::true_type {}; - - template - struct is_dynamic< basic_fixed_rank_strides > : std::true_type {}; - - template - struct is_static_rank< basic_fixed_rank_strides > : std::true_type {}; - - template - struct strides> - { - template - using type = basic_fixed_rank_strides; - }; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp deleted file mode 100644 index 19662f0ae..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_EXTENTS_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_static_extents; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - -template -struct is_extents< basic_static_extents > : std::true_type {}; - -template -struct is_static< basic_static_extents > : std::true_type {}; - -template -struct is_static_rank< basic_static_extents > : std::true_type {}; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp deleted file mode 100644 index 47137287b..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_STRIDES_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_static_extents; - -template class basic_static_strides; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_strides< basic_static_strides< basic_static_extents, L > > : std::true_type {}; - - template - struct is_static< basic_static_strides< basic_static_extents, L > > : std::true_type {}; - - template - struct is_static_rank< basic_static_strides< basic_static_extents, L > > : std::true_type {}; - - template - struct strides> - { - template - using type = basic_static_strides, Layout>; - }; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp deleted file mode 100644 index f31fb67a9..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_STRIDES_HPP - -namespace boost::numeric::ublas { - - /// @brief checks if the type is tensor strides or not - template - struct is_strides : std::false_type {}; - - template - inline static constexpr bool const is_strides_v = is_strides::value; - - template - struct strides; - - /** @brief type alias of result of strides::type - * - * @tparam E extents type either basic_extents or basic_static_extents - * - * @tparam Layout either first_order or last_order - * - */ - template - using strides_t = typename strides::template type; - -} // namespace boost::numeric::ublas - -#include -#include -#include - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp deleted file mode 100644 index b2bb161cc..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_TENSOR_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_TENSOR_HPP - -#include -#include -#include - -namespace boost::numeric::ublas{ - - template class tensor_core; - -} // namespace boost::numeric::ublas - - -namespace boost::numeric::ublas { - - /// @brief Checks if the type is valid tensor - template - struct is_valid_tensor: std::false_type{}; - - template - struct is_valid_tensor< tensor_core >: std::true_type{}; - - template - inline static constexpr bool is_valid_tensor_v = is_valid_tensor::value; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/type_traits.hpp b/include/boost/numeric/ublas/tensor/type_traits.hpp index cf1b865c7..bd12bd4b6 100644 --- a/include/boost/numeric/ublas/tensor/type_traits.hpp +++ b/include/boost/numeric/ublas/tensor/type_traits.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,10 +13,7 @@ #ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_HPP #define BOOST_UBLAS_TENSOR_TYPE_TRAITS_HPP -#include -#include - -#include -#include +#include "traits/basic_type_traits.hpp" +#include "traits/storage_traits.hpp" #endif diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 839d3a323..723f5b11a 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,35 +32,43 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : - [ run test_tensor.cpp - test_strides.cpp + [ run test_algorithms.cpp + test_einstein_notation.cpp test_expression.cpp - test_operators_comparison.cpp - test_operators_arithmetic.cpp - test_multiplication.cpp - test_multi_index_utility.cpp - test_multi_index.cpp - test_extents.cpp test_expression_evaluation.cpp - test_einstein_notation.cpp - test_algorithms.cpp - test_tensor_matrix_vector.cpp + test_extents_dynamic.cpp + test_extents_dynamic_rank_static.cpp + test_extents_functions.cpp + test_fixed_rank_expression_evaluation.cpp + test_fixed_rank_extents.cpp + test_fixed_rank_functions.cpp + test_fixed_rank_operators_arithmetic.cpp + test_fixed_rank_operators_comparison.cpp + test_fixed_rank_strides.cpp + test_fixed_rank_tensor.cpp + test_fixed_rank_tensor_matrix_vector.cpp test_functions.cpp - test_static_tensor.cpp + test_multi_index.cpp + test_multi_index_utility.cpp + test_multiplication.cpp + test_operators_arithmetic.cpp + test_operators_comparison.cpp + test_static_expression_evaluation.cpp test_static_extents.cpp - test_static_strides.cpp test_static_operators_arithmetic.cpp test_static_operators_comparison.cpp - test_static_expression_evaluation.cpp + test_static_strides.cpp + test_static_tensor.cpp test_static_tensor_matrix_vector.cpp - test_fixed_rank_tensor.cpp - test_fixed_rank_extents.cpp - test_fixed_rank_strides.cpp - test_fixed_rank_operators_arithmetic.cpp - test_fixed_rank_operators_comparison.cpp - test_fixed_rank_expression_evaluation.cpp - test_fixed_rank_tensor_matrix_vector.cpp - test_fixed_rank_functions.cpp - unit_test_framework + test_strides.cpp + test_tensor.cpp + test_tensor_matrix_vector.cpp + unit_test_framework + : + : + : + : test_tensor + : + # ] ; diff --git a/test/tensor/test_algorithms.cpp b/test/tensor/test_algorithms.cpp index 5a11a9c3a..477ee1e0c 100644 --- a/test/tensor/test_algorithms.cpp +++ b/test/tensor/test_algorithms.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -15,42 +15,37 @@ #include #include #include -#include -#include -#include -#include +#include #include "utility.hpp" #include -BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms, - * boost::unit_test::depends_on("test_extents") - * boost::unit_test::depends_on("test_strides")) +BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms/*, + * boost::unit_test::depends_on("test_shape_dynamic") * boost::unit_test::depends_on("test_strides")*/ + ) // BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms) using test_types = zip>::with_t; -using test_types2 = std::tuple>; +using test_types2 = std::tuple>; struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents { - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{4,1,3}, // 6 - extents_type{1,2,3}, // 7 - extents_type{4,2,3}, // 8 - extents_type{4,2,3,5} } // 9 - { - } - std::vector extents; + using extents_t = boost::numeric::ublas::extents<>; + const std::vector extents = + { + extents_t{1,1}, // 1 + extents_t{1,2}, // 2 + extents_t{2,1}, // 3 + extents_t{2,3}, // 4 + extents_t{2,3,1}, // 5 + extents_t{4,1,3}, // 6 + extents_t{1,2,3}, // 7 + extents_t{4,2,3}, // 8 + extents_t{4,2,3,5} + }; }; @@ -58,401 +53,364 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_copy, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; - + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; - for(auto const& n : extents) { - auto a = vector_type(product(n)); - auto b = vector_type(product(n)); - auto c = vector_type(product(n)); + constexpr auto first_order = ublas::layout::first_order{}; + constexpr auto last_order = ublas::layout::last_order {}; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wb = ublas::strides_t,ublas::layout::last_order> (n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - auto v = value_type{}; - for(auto i = 0ul; i < a.size(); ++i, v+=1){ - a[i]=v; - } + for(auto const& n : extents) { - ublas::copy( n.size(), n.data(), b.data(), wb.data(), a.data(), wa.data() ); - ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data() ); + auto a = vector_t(product(n)); + auto b = vector_t(product(n)); + auto c = vector_t(product(n)); - for(auto i = 1ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i], a[i] ); + auto wa = ublas::to_strides(n,first_order); + auto wb = ublas::to_strides(n,last_order ); + auto wc = ublas::to_strides(n,first_order); - using size_type = typename ublas::strides_t,ublas::layout::first_order>::value_type; - size_type const*const p0 = nullptr; - BOOST_CHECK_THROW( ublas::copy( n.size(), p0, c.data(), wc.data(), b.data(), wb.data() ), std::runtime_error ); - BOOST_CHECK_THROW( ublas::copy( n.size(), n.data(), c.data(), p0, b.data(), wb.data() ), std::runtime_error ); - BOOST_CHECK_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), p0 ), std::runtime_error ); - - value_type* c0 = nullptr; - BOOST_CHECK_THROW( ublas::copy( n.size(), n.data(), c0, wc.data(), b.data(), wb.data() ), std::runtime_error ); + auto v = value_type{}; + for(auto i = 0ul; i < a.size(); ++i, v+=1){ + a[i]=v; } - // special case rank == 0 - { - auto n = ublas::extents<>{}; + ublas::copy( ublas::size(n), n.data(), b.data(), wb.data(), a.data(), wa.data() ); + ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), b.data(), wb.data() ); - auto a = vector_type(product(n)); - auto b = vector_type(product(n)); - auto c = vector_type(product(n)); + for(auto i = 1ul; i < c.size(); ++i) + BOOST_CHECK_EQUAL( c[i], a[i] ); + std::size_t const*const p0 = nullptr; + value_type* c0 = nullptr; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wb = ublas::strides_t,ublas::layout::last_order> (n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), p0, c.data(), wc.data(), b.data(), wb.data() ), std::runtime_error ); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), p0, b.data(), wb.data() ), std::runtime_error ); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), b.data(), p0 ), std::runtime_error ); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), n.data(), c0, wc.data(), b.data(), wb.data() ), std::runtime_error ); + } +} - ublas::copy( n.size(), n.data(), b.data(), wb.data(), a.data(), wa.data() ); - ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data() ); +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_copy_exceptions, value, test_types2, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; - BOOST_CHECK_NO_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data() ) ); + for(auto const& n : extents) { - } + value_type* a = nullptr; + auto c = vector_t(ublas::product(n)); + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), a, wa.data() ), std::runtime_error ); + } + for(auto const& n : extents) { -} + value_type* a = nullptr; + value_type* c = nullptr; + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_copy_exceptions, value, test_types2, fixture ) -{ - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c, wc.data(), a, wa.data() ), std::runtime_error ); - for(auto const& n : extents) { + } - value_type* a = nullptr; - auto c = vector_type(product(n)); + for(auto const& n : extents) { - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + auto a = vector_t(product(n)); + value_type* c = nullptr; - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), a, wa.data() ), std::runtime_error ); - - } + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - for(auto const& n : extents) { + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c, wc.data(), a.data(), wa.data() ), std::runtime_error ); - value_type* a = nullptr; - value_type* c = nullptr; + } - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + for(auto const& n : extents) { - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c, wc.data(), a, wa.data() ), std::runtime_error ); + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - } - for(auto const& n : extents) { + size_t* wa = nullptr; + auto wc = ublas::to_strides(n,first_order); - auto a = vector_type(product(n)); - value_type* c = nullptr; + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), a.data(), wa ), std::runtime_error ); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + } - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c, wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } + for(auto const& n : extents) { - for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); - size_t* wa = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), a.data(), wa ), std::runtime_error ); - - } + size_t* wc = nullptr; + auto wa = ublas::to_strides(n,first_order); - for(auto const& n : extents) { + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc, a.data(), wa.data() ), std::runtime_error ); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); + } - size_t* wc = nullptr; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + for(auto const& n : extents) { - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c.data(), wc, a.data(), wa.data() ), std::runtime_error ); - - } + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - for(auto const& n : extents) { + size_t* m = nullptr; + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); - - size_t* m = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), m, c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - BOOST_REQUIRE_THROW( ublas::copy( n.size(), m, c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_transform, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; + constexpr auto last_order = ublas::layout::last_order {}; - for(auto const& n : extents) { - auto a = vector_type(product(n)); - auto b = vector_type(product(n)); - auto c = vector_type(product(n)); + for(auto const& n : extents) { - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wb = ublas::strides_t,ublas::layout::last_order> (n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + auto a = vector_t(ublas::product(n)); + auto b = vector_t(ublas::product(n)); + auto c = vector_t(ublas::product(n)); - auto v = value_type{}; - for(auto i = 0ul; i < a.size(); ++i, v+=1){ - a[i]=v; - } + auto wa = ublas::to_strides(n,first_order); + auto wb = ublas::to_strides(n,last_order ); + auto wc = ublas::to_strides(n,first_order); - ublas::transform( n.size(), n.data(), b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ); - ublas::transform( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a - value_type(1);} ); + auto v = value_type{}; + for(auto i = 0ul; i < a.size(); ++i, v+=1){ + a[i]=v; + } - using size_type = typename ublas::strides_t,ublas::layout::first_order>::value_type; + ublas::transform( ublas::size(n), n.data(), b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ); + ublas::transform( ublas::size(n), n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a - value_type(1);} ); - size_type zero = 0; - ublas::transform(zero, n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a + value_type(1);} ); + auto zero = std::size_t{0}; + ublas::transform(zero, n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a + value_type(1);} ); - value_type* c0 = nullptr; - const size_type* s0 = nullptr; - size_type const*const p0 = nullptr; + value_type* c0 = nullptr; + const std::size_t* s0 = nullptr; + std::size_t const*const p0 = nullptr; - BOOST_CHECK_THROW(ublas::transform( n.size(), n.data(), c0, wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); - BOOST_CHECK_THROW(ublas::transform( n.size(), n.data(), b.data(), s0, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); - BOOST_CHECK_THROW(ublas::transform( n.size(), p0, b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); + BOOST_CHECK_THROW(ublas::transform( ublas::size(n), n.data(), c0, wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); + BOOST_CHECK_THROW(ublas::transform( ublas::size(n), n.data(), b.data(), s0, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); + BOOST_CHECK_THROW(ublas::transform( ublas::size(n), p0, b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); - for(auto i = 1ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i], a[i] ); + for(auto i = 1ul; i < c.size(); ++i) + BOOST_CHECK_EQUAL( c[i], a[i] ); - } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_transform_exceptions, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; - - for(auto const& n : extents) { - - value_type* a = nullptr; - auto c = vector_type(product(n)); - - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c.data(), wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - value_type* a = nullptr; - value_type* c = nullptr; - - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c, wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; - for(auto const& n : extents) { + constexpr auto first_order = ublas::layout::first_order{}; - auto a = vector_type(product(n)); - value_type* c = nullptr; + for(auto const& n : extents) { - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + value_type* a = nullptr; + auto c = vector_t(ublas::product(n)); - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c, wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } - - for(auto const& n : extents) { + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c.data(), wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - size_t* wa = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + } - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c.data(), wc.data(), a.data(), wa, [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } + for(auto const& n : extents) { - for(auto const& n : extents) { + value_type* a = nullptr; + value_type* c = nullptr; - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - size_t* wc = nullptr; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c, wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c.data(), wc, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } + } - for(auto const& n : extents) { + for(auto const& n : extents) { - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); - - size_t* m = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + auto a = vector_t(ublas::product(n)); + value_type* c = nullptr; - BOOST_REQUIRE_THROW( ublas::transform( n.size(), m, c.data(), wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } -} + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate, value, test_types2, fixture ) -{ - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c, wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); + } - for(auto const& n : extents) { + for(auto const& n : extents) { - auto const s = product(n); + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - auto a = vector_type(product(n)); - // auto b = vector_type(product(n)); - // auto c = vector_type(product(n)); + size_t* wa = nullptr; + auto wc = ublas::to_strides(n,first_order); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - // auto wb = ublas::strides_t,ublas::layout::last_order> (n); - // auto wc = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c.data(), wc.data(), a.data(), wa, [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - auto v = value_type{}; - for(auto i = 0ul; i < a.size(); ++i, v+=value_type(1)){ - a[i]=v; - } + } - auto acc = ublas::accumulate( n.size(), n.data(), a.data(), wa.data(), v); + for(auto const& n : extents) { - BOOST_CHECK_EQUAL( acc, value_type( static_cast< inner_type_t >( s*(s+1) / 2 ) ) ); + auto a = vector_t(ublas::product(n)); + auto c = vector_t(ublas::product(n)); - using size_type = typename ublas::strides_t,ublas::layout::first_order>::value_type; - size_type zero = 0; - (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(),v); + size_t* wc = nullptr; + auto wa = ublas::to_strides(n,first_order); - value_type* c0 = nullptr; - size_type const*const p0 = nullptr; + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c.data(), wc, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), c0, wa.data(), v), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), a.data(), p0, v), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), p0, a.data(), wa.data(), v), std::runtime_error); + } + for(auto const& n : extents) { - auto acc2 = ublas::accumulate( n.size(), n.data(), a.data(), wa.data(), v, - [](auto const& l, auto const& r){return l + r; }); + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - BOOST_CHECK_EQUAL( acc2, value_type( static_cast< inner_type_t >( s*(s+1) / 2 ) ) ); + size_t* m = nullptr; + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(), v, [](auto const& l, auto const& r){return l + r; }); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), m, c.data(), wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), c0, wa.data(), v,[](auto const& l, auto const& r){return l + r; }), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), a.data(), p0, v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), p0, a.data(), wa.data(),v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); - - } + } } - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate_exceptions, value, test_types2, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; - - for(auto const& n : extents) { + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; - value_type* a = nullptr; + constexpr auto first_order = ublas::layout::first_order{}; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), n.data(), a, wa.data(), value_type{0} ), std::runtime_error ); - - } + for(auto const& n : extents) { - for(auto const& n : extents) { + auto const s = ublas::product(n); - value_type* a = nullptr; + auto a = vector_t(ublas::product(n)); + auto wa = ublas::to_strides(n,first_order); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), n.data(), a, wa.data(), value_type{0},[](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); - + auto v = value_type{}; + for(auto i = 0ul; i < a.size(); ++i, v+=value_type(1)){ + a[i]=v; } - for(auto const& n : extents) { + auto acc = ublas::accumulate( ublas::size(n), n.data(), a.data(), wa.data(), v); - auto a = vector_type(product(n)); + auto sum = std::div(s*(s+1),2).quot; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - size_t p = 0u; - BOOST_CHECK_EQUAL ( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0} ), value_type{0} ); - - } - - for(auto const& n : extents) { + BOOST_CHECK_EQUAL( acc, value_type( static_cast< inner_type_t >( sum ) ) ); - auto a = vector_type(product(n)); + auto zero = std::size_t{0}; + (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(),v); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - size_t p = 0u; - BOOST_CHECK_EQUAL( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), value_type{0} ); - - } + value_type* c0 = nullptr; + std::size_t const*const p0 = nullptr; - for(auto const& n : extents) { + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), c0, wa.data(), v), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), a.data(), p0, v), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), p0, a.data(), wa.data(), v), std::runtime_error); - auto a = vector_type(product(n)); - size_t* wa = nullptr; + auto acc2 = ublas::accumulate( ublas::size(n), n.data(), a.data(), wa.data(), v, + [](auto const& l, auto const& r){return l + r; }); - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), n.data(), a.data(), wa, value_type{0} ), std::runtime_error ); - - } + BOOST_CHECK_EQUAL( acc2, value_type( static_cast< inner_type_t >( sum ) ) ); - for(auto const& n : extents) { + (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(), v, [](auto const& l, auto const& r){return l + r; }); - auto a = vector_type(product(n)); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), c0, wa.data(), v,[](auto const& l, auto const& r){return l + r; }), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), a.data(), p0, v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), p0, a.data(), wa.data(),v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + } +} - size_t* m = nullptr; - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), m, a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); - - } +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate_exceptions, value, test_types2, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; + + + for(auto const& n : extents) { + value_type* a = nullptr; + auto wa = ublas::to_strides(n,first_order); + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), n.data(), a, wa.data(), value_type{0} ), std::runtime_error ); + + } + + for(auto const& n : extents) { + value_type* a = nullptr; + auto wa = ublas::to_strides(n,first_order); + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), n.data(), a, wa.data(), value_type{0},[](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto wa = ublas::to_strides(n,first_order); + size_t p = 0u; + BOOST_CHECK_EQUAL ( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0} ), value_type{0} ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto wa = ublas::to_strides(n,first_order); + size_t p = 0u; + BOOST_CHECK_EQUAL( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), value_type{0} ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + size_t* wa = nullptr; + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), n.data(), a.data(), wa, value_type{0} ), std::runtime_error ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto wa = ublas::to_strides(n,first_order); + size_t* m = nullptr; + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), m, a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); + } } @@ -460,282 +418,146 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate_exceptions, template void init(std::vector& a) { - auto v = V(1); - for(auto i = 0u; i < a.size(); ++i, ++v){ - a[i] = v; - } + auto v = V(1); + for(auto i = 0u; i < a.size(); ++i, ++v){ + a[i] = v; + } } template void init(std::vector>& a) { - auto v = std::complex(1,1); - for(auto i = 0u; i < a.size(); ++i){ - a[i] = v; - v.real(v.real()+1); - v.imag(v.imag()+1); - } + auto v = std::complex(1,1); + for(auto i = 0u; i < a.size(); ++i){ + a[i] = v; + v.real(v.real()+1); + v.imag(v.imag()+1); + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_trans, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename extents_type::value_type; - using permutation_type = std::vector; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; +// using layout_t = typename value::second_type; + using vector_t = std::vector; + using base_t = typename extents_t::base_type; + using permutation_type = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; - for(auto const& n : extents) { - auto p = n.size(); - auto s = product(n); + for(auto const& n : extents) { - auto pi = permutation_type(p); - auto a = vector_type(s); - auto b1 = vector_type(s); - auto b2 = vector_type(s); - auto c1 = vector_type(s); - auto c2 = vector_type(s); + auto p = ublas::size(n); + auto s = ublas::product(n); - auto wa = strides_type(n); + auto pi = permutation_type(p); + auto a = vector_t(s); + auto b1 = vector_t(s); + auto b2 = vector_t(s); + auto c1 = vector_t(s); + auto c2 = vector_t(s); - init(a); + auto wa = ublas::to_strides(n,first_order); - // so wie last-order. - for(auto i = size_type(0), j = p; i < n.size(); ++i, --j) - pi[i] = j; + init(a); - auto nc = typename extents_type::base_type (p); - for(auto i = 0u; i < p; ++i) - nc[pi[i]-1] = n[i]; + // so wie last-order. + for(auto i = std::size_t{0}, j = p; i < ublas::size(n); ++i, --j) + pi[i] = j; - auto wc = strides_type(extents_type(nc)); - auto wc_pi = typename strides_type::base_type (p); - for(auto i = 0u; i < p; ++i) - wc_pi[pi[i]-1] = wc[i]; + auto nc_base = base_t(p); + for(auto i = 0u; i < p; ++i) + nc_base[pi[i]-1] = n[i]; - ublas::copy ( p, n.data(), c1.data(), wc_pi.data(), a.data(), wa.data()); - ublas::trans( p, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); + auto nc = extents_t(std::move(nc_base)); - if(!std::is_compound_v) - for(auto i = 0ul; i < s; ++i) - BOOST_CHECK_EQUAL( c1[i], c2[i] ); + auto wc = ublas::to_strides(nc,first_order); + auto wc_pi = base_t(p); + for(auto i = 0u; i < p; ++i) + wc_pi[pi[i]-1] = wc[i]; + ublas::copy ( p, n.data(), c1.data(), wc_pi.data(), a.data(), wa.data()); + ublas::trans( p, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); - auto nb = typename extents_type::base_type (p); - for(auto i = 0u; i < p; ++i) - nb[pi[i]-1] = nc[i]; + if(!std::is_compound_v) + for(auto i = 0ul; i < s; ++i) + BOOST_CHECK_EQUAL( c1[i], c2[i] ); - auto wb = strides_type (extents_type(nb)); - auto wb_pi = typename strides_type::base_type (p); - for(auto i = 0u; i < p; ++i) - wb_pi[pi[i]-1] = wb[i]; - ublas::copy ( p, nc.data(), b1.data(), wb_pi.data(), c1.data(), wc.data()); - ublas::trans( p, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); + auto nb_base = base_t(p); + for(auto i = 0u; i < p; ++i) + nb_base[pi[i]-1] = nc[i]; - if(!std::is_compound_v) - for(auto i = 0ul; i < s; ++i) - BOOST_CHECK_EQUAL( b1[i], b2[i] ); + auto nb = extents_t(std::move(nb_base)); - for(auto i = 0ul; i < s; ++i) - BOOST_CHECK_EQUAL( a[i], b2[i] ); + auto wb = ublas::to_strides(nb,first_order); + auto wb_pi = base_t(p); + for(auto i = 0u; i < p; ++i) + wb_pi[pi[i]-1] = wb[i]; - size_type zero = 0; - ublas::trans( zero, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); - ublas::trans( zero, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); + ublas::copy ( p, nc.data(), b1.data(), wb_pi.data(), c1.data(), wc.data()); + ublas::trans( p, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); - value_type *c0 = nullptr; - size_type const*const s0 = nullptr; + if(!std::is_compound_v) + for(auto i = 0ul; i < s; ++i) + BOOST_CHECK_EQUAL( b1[i], b2[i] ); - BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c0, wc.data(), a.data(), wa.data()), std::runtime_error); - BOOST_CHECK_THROW(ublas::trans( p, s0, pi.data(), c2.data(),wc.data(), a.data(), wa.data()), std::runtime_error); - BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c2.data(), s0, a.data(), wa.data()), std::runtime_error); - BOOST_CHECK_THROW(ublas::trans( p, n.data(), s0, c2.data(), wc.data(), a.data(), wa.data()), std::runtime_error); + for(auto i = 0ul; i < s; ++i) + BOOST_CHECK_EQUAL( a[i], b2[i] ); - } -} + auto zero = std::size_t{0}; + ublas::trans( zero, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); + ublas::trans( zero, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); + value_type *c0 = nullptr; + std::size_t const*const s0 = nullptr; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_trans_exceptions, value, test_types, fixture ) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename extents_type::value_type; - using permutation_type = std::vector; - - for(auto const& n : extents) { + BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c0, wc.data(), a.data(), wa.data()), std::runtime_error); + BOOST_CHECK_THROW(ublas::trans( p, s0, pi.data(), c2.data(),wc.data(), a.data(), wa.data()), std::runtime_error); + BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c2.data(), s0, a.data(), wa.data()), std::runtime_error); + BOOST_CHECK_THROW(ublas::trans( p, n.data(), s0, c2.data(), wc.data(), a.data(), wa.data()), std::runtime_error); - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - value_type* a = nullptr; - auto c = vector_type(s); - - auto wa = strides_type(n); - - auto nc = typename extents_type::base_type (p); - auto wc = strides_type(n); - auto wc_pi = typename strides_type::base_type (p); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), a, wa.data(), c.data(), wc.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - value_type* a = nullptr; - auto c = vector_type(s); - - auto wa = strides_type(n); - auto nc = typename extents_type::base_type (p); - - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc.data(), a, wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - - auto pi = permutation_type(p); - value_type* a = nullptr; - value_type* c = nullptr; - - auto wa = strides_type(n); - auto nc = typename extents_type::base_type (p); - - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c, wc.data(), a, wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - auto wa = strides_type(n); - - auto nc = typename extents_type::base_type (p); - - size_t* wc = nullptr; - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc, a.data(), wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - auto wc = strides_type(n); - auto nc = typename extents_type::base_type (p); - - size_t* wa = nullptr; - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc.data(), a.data(), wa ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - size_t* wc = nullptr; - - auto nc = typename extents_type::base_type (p); - - size_t* wa = nullptr; - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc, a.data(), wa ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - size_type* pi = nullptr; - auto a = vector_type(s); - auto c = vector_type(s); - - auto wa = strides_type(n); - - auto nc = typename extents_type::base_type (p); - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi, c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - auto wa = strides_type(n); - size_t* nc = nullptr; - - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc, pi.data(), c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - size_type p = 1; - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); + } +} - auto wa = strides_type(n); - auto nc = typename extents_type::base_type (p); - - auto wc = strides_type(n); - ublas::trans( p, nc.data(), pi.data(), c.data(), wc.data(), a.data(), wa.data() ); - - } +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_trans_exceptions, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using permutation_type = std::vector; + + constexpr auto layout = layout_t{}; + + std::size_t* nnullptr = nullptr; + value_type * anullptr = nullptr; + + for(auto const& n : extents) { + auto p = ublas::size(n); + auto s = ublas::product(n); + auto pi = permutation_type(p); + auto a = vector_t(s); + auto c = vector_t(s); + auto wa = ublas::to_strides(n,layout); + auto wc = ublas::to_strides(n,layout); + if(p>1){ + BOOST_REQUIRE_THROW( ublas::trans( p, nnullptr, pi.data(), c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , nnullptr , c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), nnullptr , a.data(), nnullptr ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), wc.data(), a.data(), nnullptr ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), nnullptr , a.data(), wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), anullptr, wc.data(), anullptr, wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), wc.data(), anullptr, wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), anullptr, wc.data(), a.data(), wa.data() ), std::runtime_error ); + } + + // ublas::trans( p, n.data(), pi.data(), c.data(), wc.data(), a.data(), wa.data() ); + } } diff --git a/test/tensor/test_einstein_notation.cpp b/test/tensor/test_einstein_notation.cpp index 0abda5b96..400011a7c 100644 --- a/test/tensor/test_einstein_notation.cpp +++ b/test/tensor/test_einstein_notation.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,15 +12,18 @@ // And we acknowledge the support from all contributors. -#include -#include #include - #include + +#include +#include + + #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_einstein_notation, * boost::unit_test::depends_on("test_multi_index") ) +BOOST_AUTO_TEST_SUITE ( test_einstein_notation/*, + *boost::unit_test::depends_on("test_multi_index") */) using test_types = zip>::with_t; @@ -29,94 +32,115 @@ using test_types = zip>::with_t; - using namespace boost::numeric::ublas::index; - - { - auto A = tensor_type{5,3}; - auto B = tensor_type{3,4}; - // auto C = tensor_type{4,5,6}; - - for(auto j = 0u; j < A.extents().at(1); ++j) - for(auto i = 0u; i < A.extents().at(0); ++i) - A.at( i,j ) = value_type( static_cast< inner_type_t >(i+1) ); - - for(auto j = 0u; j < B.extents().at(1); ++j) - for(auto i = 0u; i < B.extents().at(0); ++i) - B.at( i,j ) = value_type( static_cast< inner_type_t >(i+1) ); - - - - auto AB = A(_,_e) * B(_e,_); + namespace ublas = boost::numeric::ublas; + + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + // NOLINTNEXTLINE(google-build-using-namespace) + using namespace boost::numeric::ublas::index; + + { + auto A = tensor_t(5,3); + auto B = tensor_t{3,4}; + // auto C = tensor_t{4,5,6}; + + for(auto j = 0u; j < A.extents().at(1); ++j){ + for(auto i = 0u; i < A.extents().at(0); ++i){ + A.at( i,j ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } - // std::cout << "A = " << A << std::endl; - // std::cout << "B = " << B << std::endl; - // std::cout << "AB = " << AB << std::endl; + for(auto j = 0u; j < B.extents().at(1); ++j){ + for(auto i = 0u; i < B.extents().at(0); ++i){ + B.at( i,j ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } - for(auto j = 0u; j < AB.extents().at(1); ++j) - for(auto i = 0u; i < AB.extents().at(0); ++i) - BOOST_CHECK_EQUAL( AB.at( i,j ) , value_type(A.at( i,0 ) * ( B.extents().at(0) * (B.extents().at(0)+1) / 2 )) ); + auto AB = A(_,_e) * B(_e,_); + // std::cout << "A = " << A << std::endl; + // std::cout << "B = " << B << std::endl; + // std::cout << "AB = " << AB << std::endl; + for(auto j = 0u; j < AB.extents().at(1); ++j){ + for(auto i = 0u; i < AB.extents().at(0); ++i){ + auto e0 = B.extents().at(0); + auto sum = std::div(e0*(e0+1),2); + auto quot = value_t(sum.quot); + BOOST_CHECK_EQUAL( AB.at(i,j) , A.at(i,0)*quot ); + } } + } - { - auto A = tensor_type{4,5,3}; - auto B = tensor_type{3,4,2}; - - for(auto k = 0u; k < A.extents().at(2); ++k) - for(auto j = 0u; j < A.extents().at(1); ++j) - for(auto i = 0u; i < A.extents().at(0); ++i) - A.at( i,j,k ) = value_type( static_cast< inner_type_t >(i+1) ); + { + auto A = tensor_t{4,5,3}; + auto B = tensor_t{3,4,2}; - for(auto k = 0u; k < B.extents().at(2); ++k) - for(auto j = 0u; j < B.extents().at(1); ++j) - for(auto i = 0u; i < B.extents().at(0); ++i) - B.at( i,j,k ) = value_type( static_cast< inner_type_t >(i+1) ); + for(auto k = 0u; k < A.extents().at(2); ++k){ + for(auto j = 0u; j < A.extents().at(1); ++j){ + for(auto i = 0u; i < A.extents().at(0); ++i){ + A.at( i,j,k ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } + } - auto AB = A(_d,_,_f) * B(_f,_d,_); + for(auto k = 0u; k < B.extents().at(2); ++k){ + for(auto j = 0u; j < B.extents().at(1); ++j){ + for(auto i = 0u; i < B.extents().at(0); ++i){ + B.at( i,j,k ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } + } - // std::cout << "A = " << A << std::endl; - // std::cout << "B = " << B << std::endl; - // std::cout << "AB = " << AB << std::endl; - // n*(n+1)/2; - auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); - auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + auto AB = A(_d,_,_f) * B(_f,_d,_); - for(auto j = 0u; j < AB.extents().at(1); ++j) - for(auto i = 0u; i < AB.extents().at(0); ++i) - BOOST_CHECK_EQUAL( AB.at( i,j ) , value_type( static_cast< inner_type_t >(nf * nd) ) ); + // std::cout << "A = " << A << std::endl; + // std::cout << "B = " << B << std::endl; + // std::cout << "AB = " << AB << std::endl; + // n*(n+1)/2; + auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); + auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + for(auto j = 0u; j < AB.extents().at(1); ++j){ + for(auto i = 0u; i < AB.extents().at(0); ++i){ + BOOST_CHECK_EQUAL( AB.at( i,j ) , value_t( static_cast< inner_type_t >(nf * nd) ) ); + } } + } - { - auto A = tensor_type{4,3}; - auto B = tensor_type{3,4,2}; + { + auto A = tensor_t{{4,3}}; + auto B = tensor_t{3,4,2}; - for(auto j = 0u; j < A.extents().at(1); ++j) - for(auto i = 0u; i < A.extents().at(0); ++i) - A.at( i,j ) = value_type( static_cast< inner_type_t >(i+1) ); + for(auto j = 0u; j < A.extents().at(1); ++j){ + for(auto i = 0u; i < A.extents().at(0); ++i){ + A.at( i,j ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } - for(auto k = 0u; k < B.extents().at(2); ++k) - for(auto j = 0u; j < B.extents().at(1); ++j) - for(auto i = 0u; i < B.extents().at(0); ++i) - B.at( i,j,k ) = value_type( static_cast< inner_type_t >(i+1) ); - auto AB = A(_d,_f) * B(_f,_d,_); + for(auto k = 0u; k < B.extents().at(2); ++k){ + for(auto j = 0u; j < B.extents().at(1); ++j){ + for(auto i = 0u; i < B.extents().at(0); ++i){ + B.at( i,j,k ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } + } - // n*(n+1)/2; - auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); - auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + auto AB = A(_d,_f) * B(_f,_d,_); - for(auto i = 0u; i < AB.extents().at(0); ++i) - BOOST_CHECK_EQUAL ( AB.at( i ) , value_type( static_cast< inner_type_t >(nf * nd) ) ); + // n*(n+1)/2; + auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); + auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + for(auto i = 0u; i < AB.extents().at(0); ++i){ + BOOST_CHECK_EQUAL ( AB.at( i ) , value_t( static_cast< inner_type_t >(nf * nd) ) ); } + + } } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_expression.cpp b/test/tensor/test_expression.cpp index 64ce969df..3d884e72c 100644 --- a/test/tensor/test_expression.cpp +++ b/test/tensor/test_expression.cpp @@ -11,7 +11,7 @@ - +#include #include #include #include @@ -28,49 +28,53 @@ using test_types = zip>::with_t; - fixture() - : extents { - extents_type{}, // 0 - - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{1,2,3}, // 6 - extents_type{1,1,2,3}, // 7 - extents_type{1,2,3,1,1}, // 8 - - extents_type{4,2,3}, // 9 - extents_type{4,2,1,3}, // 10 - extents_type{4,2,1,3,1}, // 11 - extents_type{1,4,2,1,3,1} } // 12 + + const std::vector extents { - } - std::vector extents; +// extents_type{ }, // 0 + + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{1,2,3}, // 6 + extents_type{1,1,2,3}, // 7 + extents_type{1,2,3,1,1}, // 8 + + extents_type{4,2,3}, // 9 + extents_type{4,2,1,3}, // 10 + extents_type{4,2,1,3,1}, // 11 + extents_type{1,4,2,1,3,1} // 12 + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_access, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using tensor_expression_type = typename tensor_type::super_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + using expression_t = typename tensor_t::super_type; for(auto const& e : extents) { - auto v = value_type{}; - auto t = tensor_type(e); + if(!ublas::is_valid(e)){ + continue; + } - for(auto& tt: t){ tt = v; v+=value_type{1}; } - const auto& tensor_expression_const = static_cast( t ); + auto v = value_t{}; + auto t = tensor_t(e); - for(auto i = 0ul; i < t.size(); ++i) + for(auto& tt: t){ tt = v; v+=value_t{1}; } + const auto& tensor_expression_const = static_cast( t ); + + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( tensor_expression_const()(i), t(i) ); + } } } @@ -79,36 +83,39 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_access, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_expression, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); + auto uplus1 = [](auto const& a){ return a+value_t{1}; }; + //auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_t(1) ); for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t) { tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t) { tt = v; v+=value_t{1}; } - const auto uexpr = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + const auto uexpr = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr(i), uplus1(t(i)) ); + } - auto uexpr_uexpr = ublas::detail::make_unary_tensor_expression( uexpr, uplus1 ); + auto uexpr_uexpr = ublas::detail::make_unary_tensor_expression( uexpr, uplus1 ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr_uexpr(i), uplus1(uplus1(t(i))) ); + } const auto & uexpr_e = uexpr.e; - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_e) >, tensor_t > ) ); const auto & uexpr_uexpr_e_e = uexpr_uexpr.e.e; - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_uexpr_e_e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_uexpr_e_e) >, tensor_t > ) ); } @@ -117,52 +124,58 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_expression, value, test_typ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_expression, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, std::placeholders::_1, value_type(2) ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + + auto uplus1 = [](auto const& a){ return a+value_t{1}; }; + auto uplus2 = [](auto const& a){ return a+value_t{2}; }; + //auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_t(1) ); + //auto uplus2 = std::bind( std::plus{}, std::placeholders::_1, value_t(2) ); + auto bplus = std::plus {}; + auto bminus = std::minus{}; for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr1.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr2.e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr1.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr2.e) >, tensor_t > ) ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr1(i), uplus1(t(i)) ); + } - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr2(i), uplus2(t(i)) ); + } - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.el.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.er.e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.el.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.er.e) >, tensor_t > ) ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( bexpr_uexpr(i), bplus(uexpr1(i),uexpr2(i)) ); + } - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.el.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.er.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.el.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.er.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_t > ) ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( bexpr_bexpr_uexpr(i), bminus(bexpr_uexpr(i),t(i)) ); + } } diff --git a/test/tensor/test_expression_evaluation.cpp b/test/tensor/test_expression_evaluation.cpp index 648134998..5863aa963 100644 --- a/test/tensor/test_expression_evaluation.cpp +++ b/test/tensor/test_expression_evaluation.cpp @@ -12,64 +12,63 @@ + #include #include +#include #include #include "utility.hpp" #include #include -BOOST_AUTO_TEST_SUITE(test_tensor_expression); - +BOOST_AUTO_TEST_SUITE(test_tensor_expression) using test_types = zip>::with_t; - struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents{ - extents_type{}, // 0 - - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{1,2,3}, // 6 - extents_type{1,1,2,3}, // 7 - extents_type{1,2,3,1,1}, // 8 - - extents_type{4,2,3}, // 9 - extents_type{4,2,1,3}, // 10 - extents_type{4,2,1,3,1}, // 11 - extents_type{1,4,2,1,3,1}} // 12 + using extents_t = boost::numeric::ublas::extents<>; + + const std::vector extents = { - } - std::vector extents; +// extents_t{}, // 0 + + extents_t{1,1}, // 1 + extents_t{1,2}, // 2 + extents_t{2,1}, // 3 + + extents_t{2,3}, // 4 + extents_t{2,3,1}, // 5 + extents_t{1,2,3}, // 6 + extents_t{1,1,2,3}, // 7 + extents_t{1,2,3,1,1}, // 8 + + extents_t{4,2,3}, // 9 + extents_t{4,2,1,3}, // 10 + extents_t{4,2,1,3,1}, // 11 + extents_t{1,4,2,1,3,1} // 12 + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + + auto uplus1 = [](auto const& a){ return a + value_t(1); }; + auto uplus2 = [](auto const& a){ return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); @@ -77,20 +76,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value // uexpr1 = t+1 // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); @@ -100,39 +99,39 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value for(auto i = 0u; i < extents.size()-1; ++i) { - auto v = value_type{}; + auto v = value_t{}; - auto t1 = tensor_type(extents[i]); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + auto t1 = tensor_t(extents[i]); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - auto t2 = tensor_type(extents[i+1]); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + auto t2 = tensor_t(extents[i+1]); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); // uexpr1 = t1+1 // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); } @@ -146,22 +145,21 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_all_extents_equal, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; - - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + auto uplus1 = [](auto const& a){ return a + value_t(1); }; + auto uplus2 = [](auto const& a){ return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); @@ -169,20 +167,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_all_extents_equal, valu // uexpr1 = t+1 // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); @@ -192,52 +190,52 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_all_extents_equal, valu for(auto i = 0u; i < extents.size()-1; ++i) { - auto v = value_type{}; + auto v = value_t{}; - auto t1 = tensor_type(extents[i]); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + auto t1 = tensor_t(extents[i]); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - auto t2 = tensor_type(extents[i+1]); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + auto t2 = tensor_t(extents[i+1]); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); // uexpr1 = t1+1 // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); // bexpr_uexpr2 = (t1+1) + t2 - auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); // bexpr_uexpr2 = ((t1+1) + t2) + t1 - auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); + auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) - auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); + auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); } diff --git a/test/tensor/test_extents.cpp b/test/tensor/test_extents.cpp deleted file mode 100644 index 2dd9257ff..000000000 --- a/test/tensor/test_extents.cpp +++ /dev/null @@ -1,731 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#include -#include -#include -#include -#include - -BOOST_AUTO_TEST_SUITE ( test_extents ) - - -//*boost::unit_test::label("extents") -//*boost::unit_test::label("constructor") - -BOOST_AUTO_TEST_CASE(test_extents_ctor) -{ - using namespace boost::numeric; - using extents = ublas::basic_extents; - - - auto e0 = extents{}; - BOOST_CHECK( e0.empty()); - BOOST_CHECK ( e0.size() == 0 ); - - auto e1 = extents{1,1}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK ( e1.size() == 2 ); - - auto e2 = extents{1,2}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK ( e2.size() == 2 ); - - auto e3 = extents{2,1}; - BOOST_CHECK (!e3.empty()); - BOOST_CHECK ( e3.size() == 2 ); - - auto e4 = extents{2,3}; - BOOST_CHECK(!e4.empty()); - BOOST_CHECK ( e4.size() == 2 ); - - auto e5 = extents{2,3,1}; - BOOST_CHECK (!e5.empty()); - BOOST_CHECK ( e5.size() == 3 ); - - auto e6 = extents{1,2,3}; // 6 - BOOST_CHECK(!e6.empty()); - BOOST_CHECK ( e6.size() == 3 ); - - auto e7 = extents{4,2,3}; // 7 - BOOST_CHECK(!e7.empty()); - BOOST_CHECK ( e7.size() == 3 ); - - BOOST_CHECK_THROW( extents({1,0}), std::length_error); - BOOST_CHECK_THROW( extents({0} ), std::length_error); - BOOST_CHECK_THROW( extents({3} ), std::length_error); - BOOST_CHECK_THROW( extents({0,1}), std::length_error); -} - -BOOST_AUTO_TEST_CASE(test_static_rank_extents_ctor) -{ - namespace ub = boost::numeric::ublas; - - - auto e0 = ub::extents<0>{}; - BOOST_CHECK( e0.empty()); - BOOST_CHECK ( e0.size() == 0); - - auto e1 = ub::extents<2>{1,1}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK ( e1.size() == 2); - - auto e2 = ub::extents<2>{1,2}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK ( e2.size() == 2); - - auto e3 = ub::extents<2>{2,1}; - BOOST_CHECK (!e3.empty()); - BOOST_CHECK ( e3.size() == 2); - - auto e4 = ub::extents<2>{2,3}; - BOOST_CHECK(!e4.empty()); - BOOST_CHECK ( e4.size() == 2); - - auto e5 = ub::extents<3>{2,3,1}; - BOOST_CHECK (!e5.empty()); - BOOST_CHECK ( e5.size() == 3); - - auto e6 = ub::extents<3>{1,2,3}; // 6 - BOOST_CHECK(!e6.empty()); - BOOST_CHECK ( e6.size() == 3); - - auto e7 = ub::extents<3>{4,2,3}; // 7 - BOOST_CHECK(!e7.empty()); - BOOST_CHECK ( e7.size() == 3); - - BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({0} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({3} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::out_of_range); -} - - -struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - template - using static_rank_extents_type = boost::numeric::ublas::extents; - - fixture() : extents{ - extents_type{}, // 0 - - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{1,2,3}, // 6 - extents_type{1,1,2,3}, // 7 - extents_type{1,2,3,1,1}, // 8 - - extents_type{4,2,3}, // 9 - extents_type{4,2,1,3}, // 10 - extents_type{4,2,1,3,1}, // 11 - extents_type{1,4,2,1,3,1}, // 12 - - extents_type{1,4,1}, // 13 - extents_type{1,1,1,1}, // 14 - extents_type{1,4,1,1,1}, // 15 - extents_type{1,1,2,1,1,1}, // 16 - extents_type{1,1,2,3,1,1}, // 17 - } - {} - std::vector extents; -}; - -BOOST_FIXTURE_TEST_CASE(test_extents_access, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("access")) -{ - using namespace boost::numeric; - - BOOST_REQUIRE_EQUAL(extents.size(),18); - - BOOST_CHECK_EQUAL (extents[ 0].size(), 0); - BOOST_CHECK (extents[ 0].empty() ); - - BOOST_REQUIRE_EQUAL(extents[ 1].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 2].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 3].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 4].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 5].size(), 3); - BOOST_REQUIRE_EQUAL(extents[ 6].size(), 3); - BOOST_REQUIRE_EQUAL(extents[ 7].size(), 4); - BOOST_REQUIRE_EQUAL(extents[ 8].size(), 5); - BOOST_REQUIRE_EQUAL(extents[ 9].size(), 3); - BOOST_REQUIRE_EQUAL(extents[10].size(), 4); - BOOST_REQUIRE_EQUAL(extents[11].size(), 5); - BOOST_REQUIRE_EQUAL(extents[12].size(), 6); - BOOST_REQUIRE_EQUAL(extents[13].size(), 3); - BOOST_REQUIRE_EQUAL(extents[14].size(), 4); - BOOST_REQUIRE_EQUAL(extents[15].size(), 5); - BOOST_REQUIRE_EQUAL(extents[16].size(), 6); - BOOST_REQUIRE_EQUAL(extents[17].size(), 6); - - - BOOST_CHECK_EQUAL(extents[1][0],1); - BOOST_CHECK_EQUAL(extents[1][1],1); - - BOOST_CHECK_EQUAL(extents[2][0],1); - BOOST_CHECK_EQUAL(extents[2][1],2); - - BOOST_CHECK_EQUAL(extents[3][0],2); - BOOST_CHECK_EQUAL(extents[3][1],1); - - BOOST_CHECK_EQUAL(extents[4][0],2); - BOOST_CHECK_EQUAL(extents[4][1],3); - - BOOST_CHECK_EQUAL(extents[5][0],2); - BOOST_CHECK_EQUAL(extents[5][1],3); - BOOST_CHECK_EQUAL(extents[5][2],1); - - BOOST_CHECK_EQUAL(extents[6][0],1); - BOOST_CHECK_EQUAL(extents[6][1],2); - BOOST_CHECK_EQUAL(extents[6][2],3); - - BOOST_CHECK_EQUAL(extents[7][0],1); - BOOST_CHECK_EQUAL(extents[7][1],1); - BOOST_CHECK_EQUAL(extents[7][2],2); - BOOST_CHECK_EQUAL(extents[7][3],3); - - BOOST_CHECK_EQUAL(extents[8][0],1); - BOOST_CHECK_EQUAL(extents[8][1],2); - BOOST_CHECK_EQUAL(extents[8][2],3); - BOOST_CHECK_EQUAL(extents[8][3],1); - BOOST_CHECK_EQUAL(extents[8][4],1); - - BOOST_CHECK_EQUAL(extents[9][0],4); - BOOST_CHECK_EQUAL(extents[9][1],2); - BOOST_CHECK_EQUAL(extents[9][2],3); - - BOOST_CHECK_EQUAL(extents[10][0],4); - BOOST_CHECK_EQUAL(extents[10][1],2); - BOOST_CHECK_EQUAL(extents[10][2],1); - BOOST_CHECK_EQUAL(extents[10][3],3); - - BOOST_CHECK_EQUAL(extents[11][0],4); - BOOST_CHECK_EQUAL(extents[11][1],2); - BOOST_CHECK_EQUAL(extents[11][2],1); - BOOST_CHECK_EQUAL(extents[11][3],3); - BOOST_CHECK_EQUAL(extents[11][4],1); - - BOOST_CHECK_EQUAL(extents[12][0],1); - BOOST_CHECK_EQUAL(extents[12][1],4); - BOOST_CHECK_EQUAL(extents[12][2],2); - BOOST_CHECK_EQUAL(extents[12][3],1); - BOOST_CHECK_EQUAL(extents[12][4],3); - BOOST_CHECK_EQUAL(extents[12][5],1); - - BOOST_CHECK_EQUAL(extents[13][0],1); - BOOST_CHECK_EQUAL(extents[13][1],4); - BOOST_CHECK_EQUAL(extents[13][2],1); - - BOOST_CHECK_EQUAL(extents[14][0],1); - BOOST_CHECK_EQUAL(extents[14][1],1); - BOOST_CHECK_EQUAL(extents[14][2],1); - BOOST_CHECK_EQUAL(extents[14][3],1); - - BOOST_CHECK_EQUAL(extents[15][0],1); - BOOST_CHECK_EQUAL(extents[15][1],4); - BOOST_CHECK_EQUAL(extents[15][2],1); - BOOST_CHECK_EQUAL(extents[15][3],1); - BOOST_CHECK_EQUAL(extents[15][4],1); - - BOOST_CHECK_EQUAL(extents[16][0],1); - BOOST_CHECK_EQUAL(extents[16][1],1); - BOOST_CHECK_EQUAL(extents[16][2],2); - BOOST_CHECK_EQUAL(extents[16][3],1); - BOOST_CHECK_EQUAL(extents[16][4],1); - BOOST_CHECK_EQUAL(extents[16][5],1); - - BOOST_CHECK_EQUAL(extents[17][0],1); - BOOST_CHECK_EQUAL(extents[17][1],1); - BOOST_CHECK_EQUAL(extents[17][2],2); - BOOST_CHECK_EQUAL(extents[17][3],3); - BOOST_CHECK_EQUAL(extents[17][4],1); - BOOST_CHECK_EQUAL(extents[17][5],1); -} - -BOOST_FIXTURE_TEST_CASE(test_extents_copy_ctor, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("copy_ctor")) -{ - BOOST_REQUIRE_EQUAL(extents.size(),18); - - auto e0 = extents[ 0]; // {} - auto e1 = extents[ 1]; // {1,1} - auto e2 = extents[ 2]; // {1,2} - auto e3 = extents[ 3]; // {2,1} - auto e4 = extents[ 4]; // {2,3} - auto e5 = extents[ 5]; // {2,3,1} - auto e6 = extents[ 6]; // {1,2,3} - auto e7 = extents[ 7]; // {1,1,2,3} - auto e8 = extents[ 8]; // {1,2,3,1,1} - auto e9 = extents[ 9]; // {4,2,3} - auto e10 = extents[10]; // {4,2,1,3} - auto e11 = extents[11]; // {4,2,1,3,1} - auto e12 = extents[12]; // {1,4,2,1,3,1} - auto e13 = extents[13]; // {1,4,1} - auto e14 = extents[14]; // {1,1,1,1} - auto e15 = extents[15]; // {1,4,1,1,1} - auto e16 = extents[16]; // {1,1,2,1,1,1} - auto e17 = extents[17]; // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL (e0.size(), 0); - BOOST_CHECK (e0.empty() ); - - BOOST_REQUIRE_EQUAL(e1 .size(), 2); - BOOST_REQUIRE_EQUAL(e2 .size(), 2); - BOOST_REQUIRE_EQUAL(e3 .size(), 2); - BOOST_REQUIRE_EQUAL(e4 .size(), 2); - BOOST_REQUIRE_EQUAL(e5 .size(), 3); - BOOST_REQUIRE_EQUAL(e6 .size(), 3); - BOOST_REQUIRE_EQUAL(e7 .size(), 4); - BOOST_REQUIRE_EQUAL(e8 .size(), 5); - BOOST_REQUIRE_EQUAL(e9 .size(), 3); - BOOST_REQUIRE_EQUAL(e10.size(), 4); - BOOST_REQUIRE_EQUAL(e11.size(), 5); - BOOST_REQUIRE_EQUAL(e12.size(), 6); - BOOST_REQUIRE_EQUAL(e13.size(), 3); - BOOST_REQUIRE_EQUAL(e14.size(), 4); - BOOST_REQUIRE_EQUAL(e15.size(), 5); - BOOST_REQUIRE_EQUAL(e16.size(), 6); - BOOST_REQUIRE_EQUAL(e17.size(), 6); - - - BOOST_CHECK_EQUAL(e1[0],1); - BOOST_CHECK_EQUAL(e1[1],1); - - BOOST_CHECK_EQUAL(e2[0],1); - BOOST_CHECK_EQUAL(e2[1],2); - - BOOST_CHECK_EQUAL(e3[0],2); - BOOST_CHECK_EQUAL(e3[1],1); - - BOOST_CHECK_EQUAL(e4[0],2); - BOOST_CHECK_EQUAL(e4[1],3); - - BOOST_CHECK_EQUAL(e5[0],2); - BOOST_CHECK_EQUAL(e5[1],3); - BOOST_CHECK_EQUAL(e5[2],1); - - BOOST_CHECK_EQUAL(e6[0],1); - BOOST_CHECK_EQUAL(e6[1],2); - BOOST_CHECK_EQUAL(e6[2],3); - - BOOST_CHECK_EQUAL(e7[0],1); - BOOST_CHECK_EQUAL(e7[1],1); - BOOST_CHECK_EQUAL(e7[2],2); - BOOST_CHECK_EQUAL(e7[3],3); - - BOOST_CHECK_EQUAL(e8[0],1); - BOOST_CHECK_EQUAL(e8[1],2); - BOOST_CHECK_EQUAL(e8[2],3); - BOOST_CHECK_EQUAL(e8[3],1); - BOOST_CHECK_EQUAL(e8[4],1); - - BOOST_CHECK_EQUAL(e9[0],4); - BOOST_CHECK_EQUAL(e9[1],2); - BOOST_CHECK_EQUAL(e9[2],3); - - BOOST_CHECK_EQUAL(e10[0],4); - BOOST_CHECK_EQUAL(e10[1],2); - BOOST_CHECK_EQUAL(e10[2],1); - BOOST_CHECK_EQUAL(e10[3],3); - - BOOST_CHECK_EQUAL(e11[0],4); - BOOST_CHECK_EQUAL(e11[1],2); - BOOST_CHECK_EQUAL(e11[2],1); - BOOST_CHECK_EQUAL(e11[3],3); - BOOST_CHECK_EQUAL(e11[4],1); - - BOOST_CHECK_EQUAL(e12[0],1); - BOOST_CHECK_EQUAL(e12[1],4); - BOOST_CHECK_EQUAL(e12[2],2); - BOOST_CHECK_EQUAL(e12[3],1); - BOOST_CHECK_EQUAL(e12[4],3); - BOOST_CHECK_EQUAL(e12[5],1); - - BOOST_CHECK_EQUAL(e13[0],1); - BOOST_CHECK_EQUAL(e13[1],4); - BOOST_CHECK_EQUAL(e13[2],1); - - BOOST_CHECK_EQUAL(e14[0],1); - BOOST_CHECK_EQUAL(e14[1],1); - BOOST_CHECK_EQUAL(e14[2],1); - BOOST_CHECK_EQUAL(e14[3],1); - - BOOST_CHECK_EQUAL(e15[0],1); - BOOST_CHECK_EQUAL(e15[1],4); - BOOST_CHECK_EQUAL(e15[2],1); - BOOST_CHECK_EQUAL(e15[3],1); - BOOST_CHECK_EQUAL(e15[4],1); - - BOOST_CHECK_EQUAL(e16[0],1); - BOOST_CHECK_EQUAL(e16[1],1); - BOOST_CHECK_EQUAL(e16[2],2); - BOOST_CHECK_EQUAL(e16[3],1); - BOOST_CHECK_EQUAL(e16[4],1); - BOOST_CHECK_EQUAL(e16[5],1); - - BOOST_CHECK_EQUAL(e17[0],1); - BOOST_CHECK_EQUAL(e17[1],1); - BOOST_CHECK_EQUAL(e17[2],2); - BOOST_CHECK_EQUAL(e17[3],3); - BOOST_CHECK_EQUAL(e17[4],1); - BOOST_CHECK_EQUAL(e17[5],1); - -} - -BOOST_FIXTURE_TEST_CASE(test_extents_is, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("query")) -{ - BOOST_REQUIRE_EQUAL(extents.size(),18); - - auto e0 = extents[ 0]; // {} - auto e1 = extents[ 1]; // {1,1} - auto e2 = extents[ 2]; // {1,2} - auto e3 = extents[ 3]; // {2,1} - auto e4 = extents[ 4]; // {2,3} - auto e5 = extents[ 5]; // {2,3,1} - auto e6 = extents[ 6]; // {1,2,3} - auto e7 = extents[ 7]; // {1,1,2,3} - auto e8 = extents[ 8]; // {1,2,3,1,1} - auto e9 = extents[ 9]; // {4,2,3} - auto e10 = extents[10]; // {4,2,1,3} - auto e11 = extents[11]; // {4,2,1,3,1} - auto e12 = extents[12]; // {1,4,2,1,3,1} - auto e13 = extents[13]; // {1,4,1} - auto e14 = extents[14]; // {1,1,1,1} - auto e15 = extents[15]; // {1,4,1,1,1} - auto e16 = extents[16]; // {1,1,2,1,1,1} - auto e17 = extents[17]; // {1,1,2,3,1,1} - - BOOST_CHECK( e0.empty ( )); - BOOST_CHECK( ! is_scalar(e0)); - BOOST_CHECK( ! is_vector(e0)); - BOOST_CHECK( ! is_matrix(e0)); - BOOST_CHECK( ! is_tensor(e0)); - - BOOST_CHECK( ! e1.empty ( ) ); - BOOST_CHECK( is_scalar(e1) ); - BOOST_CHECK( ! is_vector(e1) ); - BOOST_CHECK( ! is_matrix(e1) ); - BOOST_CHECK( ! is_tensor(e1) ); - - BOOST_CHECK( ! e2.empty ( ) ); - BOOST_CHECK( ! is_scalar(e2) ); - BOOST_CHECK( is_vector(e2) ); - BOOST_CHECK( ! is_matrix(e2) ); - BOOST_CHECK( ! is_tensor(e2) ); - - BOOST_CHECK( ! e3.empty ( ) ); - BOOST_CHECK( ! is_scalar(e3) ); - BOOST_CHECK( is_vector(e3) ); - BOOST_CHECK( ! is_matrix(e3) ); - BOOST_CHECK( ! is_tensor(e3) ); - - BOOST_CHECK( ! e4.empty ( ) ); - BOOST_CHECK( ! is_scalar(e4) ); - BOOST_CHECK( ! is_vector(e4) ); - BOOST_CHECK( is_matrix(e4) ); - BOOST_CHECK( ! is_tensor(e4) ); - - BOOST_CHECK( ! e5.empty ( ) ); - BOOST_CHECK( ! is_scalar(e5) ); - BOOST_CHECK( ! is_vector(e5) ); - BOOST_CHECK( is_matrix(e5) ); - BOOST_CHECK( ! is_tensor(e5) ); - - BOOST_CHECK( ! e6.empty ( ) ); - BOOST_CHECK( ! is_scalar(e6) ); - BOOST_CHECK( ! is_vector(e6) ); - BOOST_CHECK( ! is_matrix(e6) ); - BOOST_CHECK( is_tensor(e6) ); - - BOOST_CHECK( ! e7.empty ( ) ); - BOOST_CHECK( ! is_scalar(e7) ); - BOOST_CHECK( ! is_vector(e7) ); - BOOST_CHECK( ! is_matrix(e7) ); - BOOST_CHECK( is_tensor(e7) ); - - BOOST_CHECK( ! e8.empty ( ) ); - BOOST_CHECK( ! is_scalar(e8) ); - BOOST_CHECK( ! is_vector(e8) ); - BOOST_CHECK( ! is_matrix(e8) ); - BOOST_CHECK( is_tensor(e8) ); - - BOOST_CHECK( ! e9.empty ( ) ); - BOOST_CHECK( ! is_scalar(e9) ); - BOOST_CHECK( ! is_vector(e9) ); - BOOST_CHECK( ! is_matrix(e9) ); - BOOST_CHECK( is_tensor(e9) ); - - BOOST_CHECK( ! e10.empty( ) ); - BOOST_CHECK( ! is_scalar(e10) ); - BOOST_CHECK( ! is_vector(e10) ); - BOOST_CHECK( ! is_matrix(e10) ); - BOOST_CHECK( is_tensor(e10) ); - - BOOST_CHECK( ! e11.empty( ) ); - BOOST_CHECK( ! is_scalar(e11) ); - BOOST_CHECK( ! is_vector(e11) ); - BOOST_CHECK( ! is_matrix(e11) ); - BOOST_CHECK( is_tensor(e11) ); - - BOOST_CHECK( ! e12.empty( ) ); - BOOST_CHECK( ! is_scalar(e12) ); - BOOST_CHECK( ! is_vector(e12) ); - BOOST_CHECK( ! is_matrix(e12) ); - BOOST_CHECK( is_tensor(e12) ); - - BOOST_CHECK( ! e13.empty( ) ); - BOOST_CHECK( ! is_scalar(e13) ); - BOOST_CHECK( is_vector(e13) ); - BOOST_CHECK( ! is_matrix(e13) ); - BOOST_CHECK( ! is_tensor(e13) ); - - BOOST_CHECK( ! e14.empty( ) ); - BOOST_CHECK( is_scalar(e14) ); - BOOST_CHECK( ! is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - - BOOST_CHECK( ! e15.empty( ) ); - BOOST_CHECK( ! is_scalar(e15) ); - BOOST_CHECK( is_vector(e15) ); - BOOST_CHECK( ! is_matrix(e15) ); - BOOST_CHECK( ! is_tensor(e15) ); - - BOOST_CHECK( ! e16.empty( ) ); - BOOST_CHECK( ! is_scalar(e16) ); - BOOST_CHECK( ! is_vector(e16) ); - BOOST_CHECK( ! is_matrix(e16) ); - BOOST_CHECK( is_tensor(e16) ); - - BOOST_CHECK( ! e17.empty( ) ); - BOOST_CHECK( ! is_scalar(e17) ); - BOOST_CHECK( ! is_vector(e17) ); - BOOST_CHECK( ! is_matrix(e17) ); - BOOST_CHECK( is_tensor(e17) ); -} - -BOOST_FIXTURE_TEST_CASE(test_extents_squeeze, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("squeeze")) -{ - BOOST_REQUIRE_EQUAL(extents.size(),18); - - auto e0 = squeeze(extents[ 0]); // {} - auto e1 = squeeze(extents[ 1]); // {1,1} - auto e2 = squeeze(extents[ 2]); // {1,2} - auto e3 = squeeze(extents[ 3]); // {2,1} - - auto e4 = squeeze(extents[ 4]); // {2,3} - auto e5 = squeeze(extents[ 5]); // {2,3} - auto e6 = squeeze(extents[ 6]); // {2,3} - auto e7 = squeeze(extents[ 7]); // {2,3} - auto e8 = squeeze(extents[ 8]); // {2,3} - - auto e9 = squeeze(extents[ 9]); // {4,2,3} - auto e10 = squeeze(extents[10]); // {4,2,3} - auto e11 = squeeze(extents[11]); // {4,2,3} - auto e12 = squeeze(extents[12]); // {4,2,3} - - auto e13 = squeeze(extents[13]); // {1,4} - auto e14 = squeeze(extents[14]); // {1,1} - auto e15 = squeeze(extents[15]); // {1,4} - auto e16 = squeeze(extents[16]); // {2,1} - auto e17 = squeeze(extents[17]); // {2,3} - - BOOST_CHECK( (e0 == extents_type{} ) ); - BOOST_CHECK( (e1 == extents_type{1,1}) ); - BOOST_CHECK( (e2 == extents_type{1,2}) ); - BOOST_CHECK( (e3 == extents_type{2,1}) ); - - BOOST_CHECK( (e4 == extents_type{2,3}) ); - BOOST_CHECK( (e5 == extents_type{2,3}) ); - BOOST_CHECK( (e6 == extents_type{2,3}) ); - BOOST_CHECK( (e7 == extents_type{2,3}) ); - BOOST_CHECK( (e8 == extents_type{2,3}) ); - - BOOST_CHECK( (e9 == extents_type{4,2,3}) ); - BOOST_CHECK( (e10 == extents_type{4,2,3}) ); - BOOST_CHECK( (e11 == extents_type{4,2,3}) ); - BOOST_CHECK( (e12 == extents_type{4,2,3}) ); - - BOOST_CHECK( (e13 == extents_type{1,4}) ); - BOOST_CHECK( (e14 == extents_type{1,1}) ); - BOOST_CHECK( (e15 == extents_type{1,4}) ); - BOOST_CHECK( (e16 == extents_type{2,1}) ); - BOOST_CHECK( (e17 == extents_type{2,3}) ); - -} - -BOOST_FIXTURE_TEST_CASE(test_extents_comparison, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("compare")) -{ - - using namespace boost::numeric; - - auto s0 = ublas::static_extents<>{}; - auto s1 = ublas::static_extents<1,1>{}; - auto s2 = ublas::static_extents<1,4,2,1,3,1>{}; - auto s3 = ublas::static_extents<1,4,2,1,1,1>{}; - - auto d0 = ublas::extents<0>{}; - auto d1 = ublas::extents<2>{1,1}; - auto d2 = ublas::extents<6>{1,4,2,1,3,1}; - auto d3 = ublas::extents<6>{1,4,2,1,1,1}; - - auto e0 = extents[ 0]; // {} - auto e1 = extents[ 1]; // {1,1} - auto e2 = extents[12]; // {1,4,2,1,3,1} - - // static_extents<...> == extents<> - BOOST_TEST( s0 == e0 ); - BOOST_TEST( s1 == e1 ); - BOOST_TEST( s2 == e2 ); - - BOOST_TEST( e0 == s0 ); - BOOST_TEST( e1 == s1 ); - BOOST_TEST( e2 == s2 ); - - BOOST_TEST( s0 != e1 ); - BOOST_TEST( s0 != e2 ); - BOOST_TEST( s1 != e0 ); - BOOST_TEST( s1 != e2 ); - BOOST_TEST( s2 != e0 ); - BOOST_TEST( s2 != e1 ); - BOOST_TEST( s3 != e0 ); - BOOST_TEST( s3 != e1 ); - BOOST_TEST( s3 != e2 ); - - BOOST_TEST( e1 != s0 ); - BOOST_TEST( e2 != s0 ); - BOOST_TEST( e0 != s1 ); - BOOST_TEST( e2 != s1 ); - BOOST_TEST( e0 != s2 ); - BOOST_TEST( e1 != s2 ); - BOOST_TEST( e0 != s3 ); - BOOST_TEST( e1 != s3 ); - BOOST_TEST( e2 != s3 ); - - // extents == extents<> - BOOST_TEST( d0 == e0 ); - BOOST_TEST( d1 == e1 ); - BOOST_TEST( d2 == e2 ); - - BOOST_TEST( e0 == d0 ); - BOOST_TEST( e1 == d1 ); - BOOST_TEST( e2 == d2 ); - - BOOST_TEST( d0 != e1 ); - BOOST_TEST( d0 != e2 ); - BOOST_TEST( d1 != e0 ); - BOOST_TEST( d1 != e2 ); - BOOST_TEST( d2 != e0 ); - BOOST_TEST( d2 != e1 ); - BOOST_TEST( d3 != e0 ); - BOOST_TEST( d3 != e1 ); - BOOST_TEST( d3 != e2 ); - - BOOST_TEST( e1 != d0 ); - BOOST_TEST( e2 != d0 ); - BOOST_TEST( e0 != d1 ); - BOOST_TEST( e2 != d1 ); - BOOST_TEST( e0 != d2 ); - BOOST_TEST( e1 != d2 ); - BOOST_TEST( e0 != d3 ); - BOOST_TEST( e1 != d3 ); - BOOST_TEST( e2 != d3 ); - - // static_extents<...> == extents - - BOOST_TEST( s0 == d0 ); - BOOST_TEST( s1 == d1 ); - BOOST_TEST( s2 == d2 ); - BOOST_TEST( s3 == d3 ); - - BOOST_TEST( d0 == s0 ); - BOOST_TEST( d1 == s1 ); - BOOST_TEST( d2 == s2 ); - BOOST_TEST( d3 == s3 ); - - BOOST_TEST( s0 != d1 ); - BOOST_TEST( s0 != d2 ); - BOOST_TEST( s0 != d3 ); - BOOST_TEST( s1 != d0 ); - BOOST_TEST( s1 != d2 ); - BOOST_TEST( s1 != d3 ); - BOOST_TEST( s2 != d0 ); - BOOST_TEST( s2 != d1 ); - BOOST_TEST( s2 != d3 ); - BOOST_TEST( s3 != d0 ); - BOOST_TEST( s3 != d1 ); - BOOST_TEST( s3 != d2 ); - - BOOST_TEST( d1 != s0 ); - BOOST_TEST( d2 != s0 ); - BOOST_TEST( d3 != s0 ); - BOOST_TEST( d0 != s1 ); - BOOST_TEST( d2 != s1 ); - BOOST_TEST( d3 != s1 ); - BOOST_TEST( d0 != s2 ); - BOOST_TEST( d1 != s2 ); - BOOST_TEST( d3 != s2 ); - BOOST_TEST( d0 != s3 ); - BOOST_TEST( d1 != s3 ); - BOOST_TEST( d2 != s3 ); - -} - - -BOOST_FIXTURE_TEST_CASE(test_extents_product, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("product")) -{ - - auto e0 = product(extents[ 0]); // {} - auto e1 = product(extents[ 1]); // {1,1} - auto e2 = product(extents[ 2]); // {1,2} - auto e3 = product(extents[ 3]); // {2,1} - auto e4 = product(extents[ 4]); // {2,3} - auto e5 = product(extents[ 5]); // {2,3,1} - auto e6 = product(extents[ 6]); // {1,2,3} - auto e7 = product(extents[ 7]); // {1,1,2,3} - auto e8 = product(extents[ 8]); // {1,2,3,1,1} - auto e9 = product(extents[ 9]); // {4,2,3} - auto e10 = product(extents[10]); // {4,2,1,3} - auto e11 = product(extents[11]); // {4,2,1,3,1} - auto e12 = product(extents[12]); // {1,4,2,1,3,1} - auto e13 = product(extents[13]); // {1,4,1} - auto e14 = product(extents[14]); // {1,1,1,1} - auto e15 = product(extents[15]); // {1,4,1,1,1} - auto e16 = product(extents[16]); // {1,1,2,1,1,1} - auto e17 = product(extents[17]); // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL( e0 , 0 ); - BOOST_CHECK_EQUAL( e1 , 1 ); - BOOST_CHECK_EQUAL( e2 , 2 ); - BOOST_CHECK_EQUAL( e3 , 2 ); - BOOST_CHECK_EQUAL( e4 , 6 ); - BOOST_CHECK_EQUAL( e5 , 6 ); - BOOST_CHECK_EQUAL( e6 , 6 ); - BOOST_CHECK_EQUAL( e7 , 6 ); - BOOST_CHECK_EQUAL( e8 , 6 ); - BOOST_CHECK_EQUAL( e9 , 24 ); - BOOST_CHECK_EQUAL( e10, 24 ); - BOOST_CHECK_EQUAL( e11, 24 ); - BOOST_CHECK_EQUAL( e12, 24 ); - BOOST_CHECK_EQUAL( e13, 4 ); - BOOST_CHECK_EQUAL( e14, 1 ); - BOOST_CHECK_EQUAL( e15, 4 ); - BOOST_CHECK_EQUAL( e16, 2 ); - BOOST_CHECK_EQUAL( e17, 6 ); - - -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_extents_dynamic.cpp b/test/tensor/test_extents_dynamic.cpp new file mode 100644 index 000000000..fe4761b88 --- /dev/null +++ b/test/tensor/test_extents_dynamic.cpp @@ -0,0 +1,190 @@ +// +// Copyright (c) 2018 Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#include +#include + +BOOST_AUTO_TEST_SUITE ( test_extents_dynamic ) + + +struct fixture +{ + using extents = boost::numeric::ublas::extents<>; + +// static inline auto n = extents{}; + static inline auto n1 = extents{1}; + static inline auto n2 = extents{2}; + static inline auto n11 = extents{1,1}; + static inline auto n12 = extents{1,2}; + static inline auto n21 = extents{2,1}; + static inline auto n22 = extents{2,2}; + static inline auto n32 = extents{3,2}; + static inline auto n111 = extents{1,1,1}; + static inline auto n211 = extents{2,1,1}; + static inline auto n121 = extents{1,2,1}; + static inline auto n112 = extents{1,1,2}; + static inline auto n123 = extents{1,2,3}; + static inline auto n321 = extents{3,2,1}; + static inline auto n213 = extents{2,1,3}; + static inline auto n432 = extents{4,3,2}; +}; + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_empty, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("empty")) +{ + namespace ublas = boost::numeric::ublas; + +// BOOST_CHECK( ublas::empty(n )); + BOOST_CHECK(!ublas::empty(n1 )); + BOOST_CHECK(!ublas::empty(n2 )); + BOOST_CHECK(!ublas::empty(n11 )); + BOOST_CHECK(!ublas::empty(n12 )); + BOOST_CHECK(!ublas::empty(n21 )); + BOOST_CHECK(!ublas::empty(n22 )); + BOOST_CHECK(!ublas::empty(n32 )); + BOOST_CHECK(!ublas::empty(n111)); + BOOST_CHECK(!ublas::empty(n211)); + BOOST_CHECK(!ublas::empty(n121)); + BOOST_CHECK(!ublas::empty(n112)); + BOOST_CHECK(!ublas::empty(n123)); + BOOST_CHECK(!ublas::empty(n321)); + BOOST_CHECK(!ublas::empty(n213)); + BOOST_CHECK(!ublas::empty(n432)); + + BOOST_CHECK_THROW( extents({1,1,0}), std::invalid_argument); + BOOST_CHECK_THROW( extents({1,0}) , std::invalid_argument); + BOOST_CHECK_THROW( extents({0} ) , std::invalid_argument); + BOOST_CHECK_THROW( extents({0,1}) , std::invalid_argument); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_size, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("size")) +{ + namespace ublas = boost::numeric::ublas; + + +// BOOST_CHECK_EQUAL(ublas::size(n ),0); + BOOST_CHECK_EQUAL(ublas::size(n1 ),1); + BOOST_CHECK_EQUAL(ublas::size(n2 ),1); + BOOST_CHECK_EQUAL(ublas::size(n11 ),2); + BOOST_CHECK_EQUAL(ublas::size(n12 ),2); + BOOST_CHECK_EQUAL(ublas::size(n21 ),2); + BOOST_CHECK_EQUAL(ublas::size(n22 ),2); + BOOST_CHECK_EQUAL(ublas::size(n32 ),2); + BOOST_CHECK_EQUAL(ublas::size(n111),3); + BOOST_CHECK_EQUAL(ublas::size(n211),3); + BOOST_CHECK_EQUAL(ublas::size(n121),3); + BOOST_CHECK_EQUAL(ublas::size(n112),3); + BOOST_CHECK_EQUAL(ublas::size(n123),3); + BOOST_CHECK_EQUAL(ublas::size(n321),3); + BOOST_CHECK_EQUAL(ublas::size(n213),3); + BOOST_CHECK_EQUAL(ublas::size(n432),3); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_at_read, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("at_read")) +{ + BOOST_CHECK_EQUAL(n1 .at(0),1); + BOOST_CHECK_EQUAL(n2 .at(0),2); + + BOOST_CHECK_EQUAL(n11 .at(0),1); + BOOST_CHECK_EQUAL(n11 .at(1),1); + + BOOST_CHECK_EQUAL(n12 .at(0),1); + BOOST_CHECK_EQUAL(n12 .at(1),2); + + BOOST_CHECK_EQUAL(n21 .at(0),2); + BOOST_CHECK_EQUAL(n21 .at(1),1); + + BOOST_CHECK_EQUAL(n22 .at(0),2); + BOOST_CHECK_EQUAL(n22 .at(1),2); + + BOOST_CHECK_EQUAL(n32 .at(0),3); + BOOST_CHECK_EQUAL(n32 .at(1),2); + + BOOST_CHECK_EQUAL(n432.at(0),4); + BOOST_CHECK_EQUAL(n432.at(1),3); + BOOST_CHECK_EQUAL(n432.at(2),2); + + +// BOOST_CHECK_THROW( (void)n .at(0), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(2), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(5), std::out_of_range); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_at_write, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("at_write")) +{ + auto n3 = extents{1}; + n3 = extents{3}; + BOOST_CHECK_EQUAL(n3.at(0),3); + + auto n34 = extents{1,1}; + n34 = extents{3,4}; + BOOST_CHECK_EQUAL(n34.at(0),3); + BOOST_CHECK_EQUAL(n34.at(1),4); + + + auto n345 = extents{1,1,1}; + n345 = extents{3,4,5}; + BOOST_CHECK_EQUAL(n345.at(0),3); + BOOST_CHECK_EQUAL(n345.at(1),4); + BOOST_CHECK_EQUAL(n345.at(2),5); + + + auto n5432 = extents{1,1,1,1}; + n5432 = extents{5,4,3,2}; + BOOST_CHECK_EQUAL(n5432.at(0),5); + BOOST_CHECK_EQUAL(n5432.at(1),4); + BOOST_CHECK_EQUAL(n5432.at(2),3); + BOOST_CHECK_EQUAL(n5432.at(3),2); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_operator_access_read, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("operator_access_read")) +{ + BOOST_CHECK_EQUAL(n1 [0],1); + BOOST_CHECK_EQUAL(n2 [0],2); + + BOOST_CHECK_EQUAL(n11 [0],1); + BOOST_CHECK_EQUAL(n11 [1],1); + + BOOST_CHECK_EQUAL(n12 [0],1); + BOOST_CHECK_EQUAL(n12 [1],2); + + BOOST_CHECK_EQUAL(n21 [0],2); + BOOST_CHECK_EQUAL(n21 [1],1); + + BOOST_CHECK_EQUAL(n22 [0],2); + BOOST_CHECK_EQUAL(n22 [1],2); + + BOOST_CHECK_EQUAL(n32 [0],3); + BOOST_CHECK_EQUAL(n32 [1],2); + + BOOST_CHECK_EQUAL(n432[0],4); + BOOST_CHECK_EQUAL(n432[1],3); + BOOST_CHECK_EQUAL(n432[2],2); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_extents_dynamic_rank_static.cpp b/test/tensor/test_extents_dynamic_rank_static.cpp new file mode 100644 index 000000000..c7d4a6a9d --- /dev/null +++ b/test/tensor/test_extents_dynamic_rank_static.cpp @@ -0,0 +1,155 @@ +// +// Copyright (c) 2021 Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + +#include +#include + +BOOST_AUTO_TEST_SUITE ( test_shape_dynamic_static_rank ) + + +struct fixture +{ + template + using shape_t = boost::numeric::ublas::extents; + +// static inline auto n = shape_t<0>{}; + static inline auto n1 = shape_t<1>{1}; + static inline auto n2 = shape_t<1>{2}; + static inline auto n11 = shape_t<2>{1,1}; + static inline auto n12 = shape_t<2>{1,2}; + static inline auto n21 = shape_t<2>{2,1}; + static inline auto n22 = shape_t<2>{2,2}; + static inline auto n32 = shape_t<2>{3,2}; + static inline auto n111 = shape_t<3>{1,1,1}; + static inline auto n211 = shape_t<3>{2,1,1}; + static inline auto n121 = shape_t<3>{1,2,1}; + static inline auto n112 = shape_t<3>{1,1,2}; + static inline auto n123 = shape_t<3>{1,2,3}; + static inline auto n321 = shape_t<3>{3,2,1}; + static inline auto n213 = shape_t<3>{2,1,3}; + static inline auto n432 = shape_t<3>{4,3,2}; +}; + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_empty, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("empty")) +{ + namespace ublas = boost::numeric::ublas; +// BOOST_CHECK( ublas::empty(n )); + BOOST_CHECK(!ublas::empty(n1 )); + BOOST_CHECK(!ublas::empty(n2 )); + BOOST_CHECK(!ublas::empty(n11 )); + BOOST_CHECK(!ublas::empty(n12 )); + BOOST_CHECK(!ublas::empty(n21 )); + BOOST_CHECK(!ublas::empty(n22 )); + BOOST_CHECK(!ublas::empty(n32 )); + BOOST_CHECK(!ublas::empty(n111)); + BOOST_CHECK(!ublas::empty(n211)); + BOOST_CHECK(!ublas::empty(n121)); + BOOST_CHECK(!ublas::empty(n112)); + BOOST_CHECK(!ublas::empty(n123)); + BOOST_CHECK(!ublas::empty(n321)); + BOOST_CHECK(!ublas::empty(n213)); + BOOST_CHECK(!ublas::empty(n432)); + + BOOST_CHECK_THROW( shape_t<3>({1,1,0}), std::invalid_argument); + BOOST_CHECK_THROW( shape_t<2>({1,0}), std::invalid_argument); + BOOST_CHECK_THROW( shape_t<1>({0} ), std::invalid_argument); + BOOST_CHECK_THROW( shape_t<2>({0,1}), std::invalid_argument); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_size, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("size")) +{ + namespace ublas = boost::numeric::ublas; + +// BOOST_CHECK_EQUAL(ublas::size(n ),0); + BOOST_CHECK_EQUAL(ublas::size(n1 ),1); + BOOST_CHECK_EQUAL(ublas::size(n2 ),1); + BOOST_CHECK_EQUAL(ublas::size(n11 ),2); + BOOST_CHECK_EQUAL(ublas::size(n12 ),2); + BOOST_CHECK_EQUAL(ublas::size(n21 ),2); + BOOST_CHECK_EQUAL(ublas::size(n22 ),2); + BOOST_CHECK_EQUAL(ublas::size(n32 ),2); + BOOST_CHECK_EQUAL(ublas::size(n111),3); + BOOST_CHECK_EQUAL(ublas::size(n211),3); + BOOST_CHECK_EQUAL(ublas::size(n121),3); + BOOST_CHECK_EQUAL(ublas::size(n112),3); + BOOST_CHECK_EQUAL(ublas::size(n123),3); + BOOST_CHECK_EQUAL(ublas::size(n321),3); + BOOST_CHECK_EQUAL(ublas::size(n213),3); + BOOST_CHECK_EQUAL(ublas::size(n432),3); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_at_read, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("at_read")) +{ + BOOST_CHECK_EQUAL(n1 .at(0),1); + BOOST_CHECK_EQUAL(n2 .at(0),2); + + BOOST_CHECK_EQUAL(n11 .at(0),1); + BOOST_CHECK_EQUAL(n11 .at(1),1); + + BOOST_CHECK_EQUAL(n12 .at(0),1); + BOOST_CHECK_EQUAL(n12 .at(1),2); + + BOOST_CHECK_EQUAL(n21 .at(0),2); + BOOST_CHECK_EQUAL(n21 .at(1),1); + + BOOST_CHECK_EQUAL(n22 .at(0),2); + BOOST_CHECK_EQUAL(n22 .at(1),2); + + BOOST_CHECK_EQUAL(n32 .at(0),3); + BOOST_CHECK_EQUAL(n32 .at(1),2); + + BOOST_CHECK_EQUAL(n432.at(0),4); + BOOST_CHECK_EQUAL(n432.at(1),3); + BOOST_CHECK_EQUAL(n432.at(2),2); + + +// BOOST_CHECK_THROW( (void)n .at(0), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(2), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(5), std::out_of_range); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_operator_access_read, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("operator_access_read")) +{ + BOOST_CHECK_EQUAL(n1 [0],1); + BOOST_CHECK_EQUAL(n2 [0],2); + + BOOST_CHECK_EQUAL(n11 [0],1); + BOOST_CHECK_EQUAL(n11 [1],1); + + BOOST_CHECK_EQUAL(n12 [0],1); + BOOST_CHECK_EQUAL(n12 [1],2); + + BOOST_CHECK_EQUAL(n21 [0],2); + BOOST_CHECK_EQUAL(n21 [1],1); + + BOOST_CHECK_EQUAL(n22 [0],2); + BOOST_CHECK_EQUAL(n22 [1],2); + + BOOST_CHECK_EQUAL(n32 [0],3); + BOOST_CHECK_EQUAL(n32 [1],2); + + BOOST_CHECK_EQUAL(n432[0],4); + BOOST_CHECK_EQUAL(n432[1],3); + BOOST_CHECK_EQUAL(n432[2],2); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_extents_functions.cpp b/test/tensor/test_extents_functions.cpp new file mode 100644 index 000000000..868afd26e --- /dev/null +++ b/test/tensor/test_extents_functions.cpp @@ -0,0 +1,634 @@ +// +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#include +#include + +#include +#include + + + +BOOST_AUTO_TEST_SUITE(test_shape_functions) + +struct fixture_extents_dynamic_rank +{ + using shape_t = boost::numeric::ublas::extents<>; + + static inline auto n = shape_t{}; + static inline auto n1 = shape_t{1}; + static inline auto n2 = shape_t{2}; + static inline auto n11 = shape_t{1,1}; + static inline auto n12 = shape_t{1,2}; + static inline auto n21 = shape_t{2,1}; + static inline auto n22 = shape_t{2,2}; + static inline auto n32 = shape_t{3,2}; + static inline auto n111 = shape_t{1,1,1}; + static inline auto n211 = shape_t{2,1,1}; + static inline auto n121 = shape_t{1,2,1}; + static inline auto n112 = shape_t{1,1,2}; + static inline auto n123 = shape_t{1,2,3}; + static inline auto n321 = shape_t{3,2,1}; + static inline auto n213 = shape_t{2,1,3}; + static inline auto n432 = shape_t{4,3,2}; +}; + +struct fixture_extents_static_rank +{ + template + using extents_static_rank = boost::numeric::ublas::extents; + + static constexpr inline auto n = extents_static_rank<0>{}; + static constexpr inline auto n1 = extents_static_rank<1>{1}; + static constexpr inline auto n2 = extents_static_rank<1>{2}; + static constexpr inline auto n11 = extents_static_rank<2>{{1,1}}; + static constexpr inline auto n12 = extents_static_rank<2>{{1,2}}; + static constexpr inline auto n21 = extents_static_rank<2>{{2,1}}; + static constexpr inline auto n22 = extents_static_rank<2>{{2,2}}; + static constexpr inline auto n32 = extents_static_rank<2>{{3,2}}; + static constexpr inline auto n111 = extents_static_rank<3>{{1,1,1}}; + static constexpr inline auto n211 = extents_static_rank<3>{{2,1,1}}; + static constexpr inline auto n121 = extents_static_rank<3>{{1,2,1}}; + static constexpr inline auto n112 = extents_static_rank<3>{{1,1,2}}; + static constexpr inline auto n123 = extents_static_rank<3>{{1,2,3}}; + static constexpr inline auto n321 = extents_static_rank<3>{{3,2,1}}; + static constexpr inline auto n213 = extents_static_rank<3>{{2,1,3}}; + static constexpr inline auto n432 = extents_static_rank<3>{{4,3,2}}; + + static constexpr inline auto tuple = std::make_tuple( n,n1,n2,n11,n12,n21,n22,n32,n111,n211,n121,n112,n123,n321,n213,n432 ); + +}; + + + +struct fixture_extents_static +{ + template + using extents_static = boost::numeric::ublas::extents; + + static inline auto n = extents_static<> {}; + static inline auto n1 = extents_static<1> {}; + static inline auto n2 = extents_static<2> {}; + static inline auto n11 = extents_static<1,1> {}; + static inline auto n12 = extents_static<1,2> {}; + static inline auto n21 = extents_static<2,1> {}; + static inline auto n22 = extents_static<2,2> {}; + static inline auto n32 = extents_static<3,2> {}; + static inline auto n111 = extents_static<1,1,1> {}; + static inline auto n211 = extents_static<2,1,1> {}; + static inline auto n121 = extents_static<1,2,1> {}; + static inline auto n112 = extents_static<1,1,2> {}; + static inline auto n123 = extents_static<1,2,3> {}; + static inline auto n321 = extents_static<3,2,1> {}; + static inline auto n213 = extents_static<2,1,3> {}; + static inline auto n432 = extents_static<4,3,2> {}; + + + +}; + + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_scalar, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_scalar")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_scalar( n )); + BOOST_CHECK ( ub::is_scalar( n1 )); + BOOST_CHECK ( !ub::is_scalar( n2 )); + BOOST_CHECK ( ub::is_scalar( n11 )); + BOOST_CHECK ( !ub::is_scalar( n12 )); + BOOST_CHECK ( !ub::is_scalar( n21 )); + BOOST_CHECK ( !ub::is_scalar( n22 )); + BOOST_CHECK ( !ub::is_scalar( n32 )); + BOOST_CHECK ( ub::is_scalar( n111 )); + BOOST_CHECK ( !ub::is_scalar( n211 )); + BOOST_CHECK ( !ub::is_scalar( n121 )); + BOOST_CHECK ( !ub::is_scalar( n112 )); + BOOST_CHECK ( !ub::is_scalar( n123 )); + BOOST_CHECK ( !ub::is_scalar( n321 )); + BOOST_CHECK ( !ub::is_scalar( n213 )); + BOOST_CHECK ( !ub::is_scalar( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_scalar, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_scalar")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_scalar( n )); + BOOST_CHECK ( ub::is_scalar( n1 )); + BOOST_CHECK ( !ub::is_scalar( n2 )); + BOOST_CHECK ( ub::is_scalar( n11 )); + BOOST_CHECK ( !ub::is_scalar( n12 )); + BOOST_CHECK ( !ub::is_scalar( n21 )); + BOOST_CHECK ( !ub::is_scalar( n22 )); + BOOST_CHECK ( !ub::is_scalar( n32 )); + BOOST_CHECK ( ub::is_scalar( n111 )); + BOOST_CHECK ( !ub::is_scalar( n211 )); + BOOST_CHECK ( !ub::is_scalar( n121 )); + BOOST_CHECK ( !ub::is_scalar( n112 )); + BOOST_CHECK ( !ub::is_scalar( n123 )); + BOOST_CHECK ( !ub::is_scalar( n321 )); + BOOST_CHECK ( !ub::is_scalar( n213 )); + BOOST_CHECK ( !ub::is_scalar( n432 )); + +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_scalar, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_scalar")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_scalar( n )); +//FIXME: BOOST_CHECK ( ub::is_scalar( n1 )); + BOOST_CHECK ( !ub::is_scalar( n2 )); + BOOST_CHECK ( ub::is_scalar( n11 )); + BOOST_CHECK ( !ub::is_scalar( n12 )); + BOOST_CHECK ( !ub::is_scalar( n21 )); + BOOST_CHECK ( !ub::is_scalar( n22 )); + BOOST_CHECK ( !ub::is_scalar( n32 )); + BOOST_CHECK ( ub::is_scalar( n111 )); + BOOST_CHECK ( !ub::is_scalar( n211 )); + BOOST_CHECK ( !ub::is_scalar( n121 )); + BOOST_CHECK ( !ub::is_scalar( n112 )); + BOOST_CHECK ( !ub::is_scalar( n123 )); + BOOST_CHECK ( !ub::is_scalar( n321 )); + BOOST_CHECK ( !ub::is_scalar( n213 )); + BOOST_CHECK ( !ub::is_scalar( n432 )); + +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_vector, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_vector")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_vector( n )); + BOOST_CHECK ( ub::is_vector( n1 )); + BOOST_CHECK ( ub::is_vector( n2 )); + BOOST_CHECK ( ub::is_vector( n11 )); + BOOST_CHECK ( ub::is_vector( n12 )); + BOOST_CHECK ( ub::is_vector( n21 )); + BOOST_CHECK ( !ub::is_vector( n22 )); + BOOST_CHECK ( !ub::is_vector( n32 )); + BOOST_CHECK ( ub::is_vector( n111 )); + BOOST_CHECK ( ub::is_vector( n211 )); + BOOST_CHECK ( ub::is_vector( n121 )); + BOOST_CHECK ( !ub::is_vector( n112 )); + BOOST_CHECK ( !ub::is_vector( n123 )); + BOOST_CHECK ( !ub::is_vector( n321 )); + BOOST_CHECK ( !ub::is_vector( n213 )); + BOOST_CHECK ( !ub::is_vector( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_vector, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_vector")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_vector( n )); + BOOST_CHECK ( ub::is_vector( n1 )); + BOOST_CHECK ( ub::is_vector( n2 )); + BOOST_CHECK ( ub::is_vector( n11 )); + BOOST_CHECK ( ub::is_vector( n12 )); + BOOST_CHECK ( ub::is_vector( n21 )); + BOOST_CHECK ( !ub::is_vector( n22 )); + BOOST_CHECK ( !ub::is_vector( n32 )); + BOOST_CHECK ( ub::is_vector( n111 )); + BOOST_CHECK ( ub::is_vector( n211 )); + BOOST_CHECK ( ub::is_vector( n121 )); + BOOST_CHECK ( !ub::is_vector( n112 )); + BOOST_CHECK ( !ub::is_vector( n123 )); + BOOST_CHECK ( !ub::is_vector( n321 )); + BOOST_CHECK ( !ub::is_vector( n213 )); + BOOST_CHECK ( !ub::is_vector( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_vector, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_vector")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_vector( n1 )); +//FIXME: BOOST_CHECK ( ub::is_vector( n2 )); + BOOST_CHECK ( ub::is_vector( n11 )); + BOOST_CHECK ( ub::is_vector( n12 )); + BOOST_CHECK ( ub::is_vector( n21 )); + BOOST_CHECK ( !ub::is_vector( n22 )); + BOOST_CHECK ( !ub::is_vector( n32 )); + BOOST_CHECK ( ub::is_vector( n111 )); + BOOST_CHECK ( ub::is_vector( n211 )); + BOOST_CHECK ( ub::is_vector( n121 )); + BOOST_CHECK ( !ub::is_vector( n112 )); + BOOST_CHECK ( !ub::is_vector( n123 )); + BOOST_CHECK ( !ub::is_vector( n321 )); + BOOST_CHECK ( !ub::is_vector( n213 )); + BOOST_CHECK ( !ub::is_vector( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_matrix, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_matrix")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_matrix( n )); + BOOST_CHECK ( ub::is_matrix( n1 )); + BOOST_CHECK ( ub::is_matrix( n2 )); + BOOST_CHECK ( ub::is_matrix( n11 )); + BOOST_CHECK ( ub::is_matrix( n12 )); + BOOST_CHECK ( ub::is_matrix( n21 )); + BOOST_CHECK ( ub::is_matrix( n22 )); + BOOST_CHECK ( ub::is_matrix( n32 )); + BOOST_CHECK ( ub::is_matrix( n111 )); + BOOST_CHECK ( ub::is_matrix( n211 )); + BOOST_CHECK ( ub::is_matrix( n121 )); + BOOST_CHECK ( !ub::is_matrix( n112 )); + BOOST_CHECK ( !ub::is_matrix( n123 )); + BOOST_CHECK ( ub::is_matrix( n321 )); + BOOST_CHECK ( !ub::is_matrix( n213 )); + BOOST_CHECK ( !ub::is_matrix( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_matrix, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_matrix")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_matrix( n )); + BOOST_CHECK ( ub::is_matrix( n1 )); + BOOST_CHECK ( ub::is_matrix( n2 )); + BOOST_CHECK ( ub::is_matrix( n11 )); + BOOST_CHECK ( ub::is_matrix( n12 )); + BOOST_CHECK ( ub::is_matrix( n21 )); + BOOST_CHECK ( ub::is_matrix( n22 )); + BOOST_CHECK ( ub::is_matrix( n32 )); + BOOST_CHECK ( ub::is_matrix( n111 )); + BOOST_CHECK ( ub::is_matrix( n211 )); + BOOST_CHECK ( ub::is_matrix( n121 )); + BOOST_CHECK ( !ub::is_matrix( n112 )); + BOOST_CHECK ( !ub::is_matrix( n123 )); + BOOST_CHECK ( ub::is_matrix( n321 )); + BOOST_CHECK ( !ub::is_matrix( n213 )); + BOOST_CHECK ( !ub::is_matrix( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_matrix, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_matrix")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_matrix( n )); +//FIXME: BOOST_CHECK ( !ub::is_matrix( n1 )); + BOOST_CHECK ( !ub::is_matrix( n2 )); + BOOST_CHECK ( ub::is_matrix( n11 )); + BOOST_CHECK ( ub::is_matrix( n12 )); + BOOST_CHECK ( ub::is_matrix( n21 )); + BOOST_CHECK ( ub::is_matrix( n22 )); + BOOST_CHECK ( ub::is_matrix( n32 )); + BOOST_CHECK ( ub::is_matrix( n111 )); + BOOST_CHECK ( ub::is_matrix( n211 )); + BOOST_CHECK ( ub::is_matrix( n121 )); + BOOST_CHECK ( !ub::is_matrix( n112 )); + BOOST_CHECK ( !ub::is_matrix( n123 )); + BOOST_CHECK ( ub::is_matrix( n321 )); + BOOST_CHECK ( !ub::is_matrix( n213 )); + BOOST_CHECK ( !ub::is_matrix( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_tensor, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_tensor")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_tensor( n )); + BOOST_CHECK ( !ub::is_tensor( n1 )); + BOOST_CHECK ( !ub::is_tensor( n2 )); + BOOST_CHECK ( !ub::is_tensor( n11 )); + BOOST_CHECK ( !ub::is_tensor( n12 )); + BOOST_CHECK ( !ub::is_tensor( n21 )); + BOOST_CHECK ( !ub::is_tensor( n22 )); + BOOST_CHECK ( !ub::is_tensor( n32 )); + BOOST_CHECK ( !ub::is_tensor( n111 )); + BOOST_CHECK ( !ub::is_tensor( n211 )); + BOOST_CHECK ( !ub::is_tensor( n121 )); + BOOST_CHECK ( ub::is_tensor( n112 )); + BOOST_CHECK ( ub::is_tensor( n123 )); + BOOST_CHECK ( !ub::is_tensor( n321 )); + BOOST_CHECK ( ub::is_tensor( n213 )); + BOOST_CHECK ( ub::is_tensor( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_tensor, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_tensor")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_tensor( n )); + BOOST_CHECK ( !ub::is_tensor( n1 )); + BOOST_CHECK ( !ub::is_tensor( n2 )); + BOOST_CHECK ( !ub::is_tensor( n11 )); + BOOST_CHECK ( !ub::is_tensor( n12 )); + BOOST_CHECK ( !ub::is_tensor( n21 )); + BOOST_CHECK ( !ub::is_tensor( n22 )); + BOOST_CHECK ( !ub::is_tensor( n32 )); + BOOST_CHECK ( !ub::is_tensor( n111 )); + BOOST_CHECK ( !ub::is_tensor( n211 )); + BOOST_CHECK ( !ub::is_tensor( n121 )); + BOOST_CHECK ( ub::is_tensor( n112 )); + BOOST_CHECK ( ub::is_tensor( n123 )); + BOOST_CHECK ( !ub::is_tensor( n321 )); + BOOST_CHECK ( ub::is_tensor( n213 )); + BOOST_CHECK ( ub::is_tensor( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_tensor, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_tensor")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_tensor( n )); +//FIXME: BOOST_CHECK ( !ub::is_tensor( n1 )); + BOOST_CHECK ( !ub::is_tensor( n2 )); + BOOST_CHECK ( !ub::is_tensor( n11 )); + BOOST_CHECK ( !ub::is_tensor( n12 )); + BOOST_CHECK ( !ub::is_tensor( n21 )); + BOOST_CHECK ( !ub::is_tensor( n22 )); + BOOST_CHECK ( !ub::is_tensor( n32 )); + BOOST_CHECK ( !ub::is_tensor( n111 )); + BOOST_CHECK ( !ub::is_tensor( n211 )); + BOOST_CHECK ( !ub::is_tensor( n121 )); + BOOST_CHECK ( ub::is_tensor( n112 )); + BOOST_CHECK ( ub::is_tensor( n123 )); + BOOST_CHECK ( !ub::is_tensor( n321 )); + BOOST_CHECK ( ub::is_tensor( n213 )); + BOOST_CHECK ( ub::is_tensor( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_valid, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_valid")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( ub::is_valid( n1 )); + BOOST_CHECK ( ub::is_valid( n2 )); + BOOST_CHECK ( ub::is_valid( n11 )); + BOOST_CHECK ( ub::is_valid( n12 )); + BOOST_CHECK ( ub::is_valid( n21 )); + BOOST_CHECK ( ub::is_valid( n22 )); + BOOST_CHECK ( ub::is_valid( n32 )); + BOOST_CHECK ( ub::is_valid( n111 )); + BOOST_CHECK ( ub::is_valid( n211 )); + BOOST_CHECK ( ub::is_valid( n121 )); + BOOST_CHECK ( ub::is_valid( n112 )); + BOOST_CHECK ( ub::is_valid( n123 )); + BOOST_CHECK ( ub::is_valid( n321 )); + BOOST_CHECK ( ub::is_valid( n213 )); + BOOST_CHECK ( ub::is_valid( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_valid, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_valid")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( ub::is_valid( n )); + BOOST_CHECK ( ub::is_valid( n1 )); + BOOST_CHECK ( ub::is_valid( n2 )); + BOOST_CHECK ( ub::is_valid( n11 )); + BOOST_CHECK ( ub::is_valid( n12 )); + BOOST_CHECK ( ub::is_valid( n21 )); + BOOST_CHECK ( ub::is_valid( n22 )); + BOOST_CHECK ( ub::is_valid( n32 )); + BOOST_CHECK ( ub::is_valid( n111 )); + BOOST_CHECK ( ub::is_valid( n211 )); + BOOST_CHECK ( ub::is_valid( n121 )); + BOOST_CHECK ( ub::is_valid( n112 )); + BOOST_CHECK ( ub::is_valid( n123 )); + BOOST_CHECK ( ub::is_valid( n321 )); + BOOST_CHECK ( ub::is_valid( n213 )); + BOOST_CHECK ( ub::is_valid( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_valid, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_valid")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( ub::is_valid( n )); +//FIXME: BOOST_CHECK ( ub::is_valid( n1 )); +//FIXME: BOOST_CHECK ( ub::is_valid( n2 )); + BOOST_CHECK ( ub::is_valid( n11 )); + BOOST_CHECK ( ub::is_valid( n12 )); + BOOST_CHECK ( ub::is_valid( n21 )); + BOOST_CHECK ( ub::is_valid( n22 )); + BOOST_CHECK ( ub::is_valid( n32 )); + BOOST_CHECK ( ub::is_valid( n111 )); + BOOST_CHECK ( ub::is_valid( n211 )); + BOOST_CHECK ( ub::is_valid( n121 )); + BOOST_CHECK ( ub::is_valid( n112 )); + BOOST_CHECK ( ub::is_valid( n123 )); + BOOST_CHECK ( ub::is_valid( n321 )); + BOOST_CHECK ( ub::is_valid( n213 )); + BOOST_CHECK ( ub::is_valid( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_product, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("product")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK_EQUAL ( ub::product( n ), 0U); + BOOST_CHECK_EQUAL ( ub::product( n1 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n2 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n11 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n12 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n21 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n22 ), 4U); + BOOST_CHECK_EQUAL ( ub::product( n32 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n111 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n211 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n121 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n112 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n123 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n321 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n213 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n432 ),24U); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_product, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("product")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK_EQUAL ( ub::product( n ), 0U); + BOOST_CHECK_EQUAL ( ub::product( n1 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n2 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n11 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n12 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n21 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n22 ), 4U); + BOOST_CHECK_EQUAL ( ub::product( n32 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n111 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n211 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n121 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n112 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n123 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n321 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n213 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n432 ),24U); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_product, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("product")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK_EQUAL ( ub::product( n ), 0U); +//FIXME: BOOST_CHECK_EQUAL ( ub::product( n1 ), 1U); +//FIXME: BOOST_CHECK_EQUAL ( ub::product( n2 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n11 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n12 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n21 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n22 ), 4U); + BOOST_CHECK_EQUAL ( ub::product( n32 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n111 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n211 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n121 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n112 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n123 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n321 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n213 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n432 ),24U); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_equal, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("equal")) +{ + BOOST_CHECK ( n == n ); + BOOST_CHECK ( n1 == n1 ); + BOOST_CHECK ( n2 == n2 ); + BOOST_CHECK ( n11 == n11 ); + BOOST_CHECK ( n12 == n12 ); + BOOST_CHECK ( n21 == n21 ); + BOOST_CHECK ( n22 == n22 ); + BOOST_CHECK ( n32 == n32 ); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_equal, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("equal")) +{ + BOOST_CHECK ( n == n ); + BOOST_CHECK ( n1 == n1 ); + BOOST_CHECK ( n2 == n2 ); + BOOST_CHECK ( n11 == n11 ); + BOOST_CHECK ( n12 == n12 ); + BOOST_CHECK ( n21 == n21 ); + BOOST_CHECK ( n22 == n22 ); + BOOST_CHECK ( n32 == n32 ); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_equal, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("equal")) +{ + BOOST_CHECK ( n == n ); + BOOST_CHECK ( n1 == n1 ); + BOOST_CHECK ( n2 == n2 ); + BOOST_CHECK ( n11 == n11 ); + BOOST_CHECK ( n12 == n12 ); + BOOST_CHECK ( n21 == n21 ); + BOOST_CHECK ( n22 == n22 ); + BOOST_CHECK ( n32 == n32 ); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_not_equal, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("not_equal")) +{ + BOOST_CHECK ( ! (n != n ) ); + BOOST_CHECK ( ! (n1 != n1) ); + BOOST_CHECK ( ! (n2 != n2) ); + BOOST_CHECK ( ! (n11 != n11) ); + BOOST_CHECK ( ! (n12 != n12) ); + BOOST_CHECK ( ! (n21 != n21) ); + BOOST_CHECK ( ! (n22 != n22) ); + BOOST_CHECK ( ! (n32 != n32) ); + BOOST_CHECK ( (n2 != n1) ); + BOOST_CHECK ( (n11 != n12) ); + BOOST_CHECK ( (n12 != n21) ); + BOOST_CHECK ( (n21 != n22) ); + BOOST_CHECK ( (n22 != n32) ); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_fixed_rank_expression_evaluation.cpp b/test/tensor/test_fixed_rank_expression_evaluation.cpp index 97aba6aec..a1e06885b 100644 --- a/test/tensor/test_fixed_rank_expression_evaluation.cpp +++ b/test/tensor/test_fixed_rank_expression_evaluation.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,150 +12,150 @@ -#include -#include "utility.hpp" +#include +#include #include +#include "utility.hpp" + +#include #include +#include -BOOST_AUTO_TEST_SUITE(test_fixed_rank_tensor_expression); +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_expression) using test_types = zip>::with_t; + + struct fixture { - template - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<0>, // 0 - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<2>, // 3 - extents_type<2>, // 4 - extents_type<3>, // 5 - extents_type<3>, // 6 - extents_type<3>, // 7 - extents_type<3>, // 8 - extents_type<4> // 9 - > extents = { - extents_type<0>{}, - extents_type<2>{1,1}, - extents_type<2>{1,2}, - extents_type<2>{2,1}, - extents_type<2>{2,3}, - extents_type<3>{2,3,1}, - extents_type<3>{4,1,3}, - extents_type<3>{1,2,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + template + using extents_t = boost::numeric::ublas::extents; + + static constexpr auto extents = + std::make_tuple( +// extents_t<0> {}, + extents_t<2> {1,1}, + extents_t<2> {1,2}, + extents_t<2> {2,1}, + extents_t<2> {2,3}, + extents_t<3> {2,3,1}, + extents_t<3> {4,1,3}, + extents_t<3> {1,2,3}, + extents_t<3> {4,2,3}, + extents_t<4>{4,2,3,5} ); }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_expression_retrieve_extents, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_extents, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto & e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } - BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; - // uexpr1 = t+1 - // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); - BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } - // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); + BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); + // uexpr1 = t+1 + // uexpr2 = 2+t + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); + // bexpr_uexpr = (t+1) + (2+t) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - }); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); + + + // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - for_each_tuple(extents, [&](auto I, auto& e1){ + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); - if ( I >= std::tuple_size_v - 1 ){ - return; - } - - using extents_type1 = std::decay_t; - using tensor_type1 = ublas::fixed_rank_tensor; + }); - for_each_tuple(extents, [&](auto J, auto& e2){ + for_each_in_tuple(extents, [&](auto I, auto const& e1){ - if( J != I + 1 ){ - return; - } - using extents_type2 = std::decay_t; - using tensor_type2 = ublas::fixed_rank_tensor; + if ( I >= std::tuple_size_v - 1 ){ + return; + } - auto v = value_type{}; + constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; - tensor_type1 t1(e1); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + for_each_in_tuple(extents, [&,I](auto J, auto const& e2){ - tensor_type2 t2(e2); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + if( J != I + 1 ){ + return; + } - BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); + static constexpr auto size1 = std::tuple_size_v>; + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; - // uexpr1 = t1+1 - // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + auto v = value_t{}; - BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - if constexpr( extents_type1::_size == extents_type2::_size ){ - // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); + BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); + // uexpr1 = t1+1 + // uexpr2 = 2+t2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); - // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (t1+1) + (2+t2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); - // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); - } + // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); + + + // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); + } - }); }); + }); } @@ -164,122 +164,123 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_expression_retrieve_ext -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_expression_all_extents_equal, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents_equal, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } - BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); - // uexpr1 = t+1 - // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); + // uexpr1 = t+1 + // uexpr2 = 2+t + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); + // bexpr_uexpr = (t+1) + (2+t) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); - // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); + // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - }); + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); + + }); + + + for_each_in_tuple(extents, [&](auto I, auto& e1){ + if ( I >= std::tuple_size_v - 1){ + return; + } - for_each_tuple(extents, [&](auto I, auto& e1){ + static constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; - if ( I >= std::tuple_size_v - 1){ - return; - } - - using extents_type1 = std::decay_t; - using tensor_type1 = ublas::fixed_rank_tensor; + for_each_in_tuple(extents, [&](auto J, auto& e2){ - for_each_tuple(extents, [&](auto J, auto& e2){ + if( J != I + 1 ){ + return; + } - if( J != I + 1 ){ - return; - } - using extents_type2 = std::decay_t; - using tensor_type2 = ublas::fixed_rank_tensor; + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; - auto v = value_type{}; + auto v = value_t{}; - tensor_type1 t1(e1); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - tensor_type2 t2(e2); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } - BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); - // uexpr1 = t1+1 - // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + // uexpr1 = t1+1 + // uexpr2 = 2+t2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); - if constexpr( extents_type1::_size == extents_type2::_size ){ - // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (t1+1) + (2+t2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); - // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); - // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = (t1+1) + t2 - auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); + // bexpr_uexpr2 = (t1+1) + t2 + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = ((t1+1) + t2) + t1 - auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); + // bexpr_uexpr2 = ((t1+1) + t2) + t1 + auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); - // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) - auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); - } + // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) + auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); + } - }); }); + }); } diff --git a/test/tensor/test_fixed_rank_extents.cpp b/test/tensor/test_fixed_rank_extents.cpp index 5d55d6b01..ac873f55c 100644 --- a/test/tensor/test_fixed_rank_extents.cpp +++ b/test/tensor/test_fixed_rank_extents.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -11,544 +11,543 @@ // #include -#include // Needed for squeeze -#include +#include #include -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_extents ) +BOOST_AUTO_TEST_SUITE ( test_extents_static_size ) //*boost::unit_test::label("extents") //*boost::unit_test::label("constructor") -BOOST_AUTO_TEST_CASE(test_fixed_rank_extents_ctor) +BOOST_AUTO_TEST_CASE(test_extents_static_size_ctor) { - namespace ub = boost::numeric::ublas; - - - auto e0 = ub::extents<0>{}; - BOOST_CHECK( e0.empty()); - BOOST_CHECK ( e0.size() == 0); - - auto e1 = ub::extents<2>{1,1}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK ( e1.size() == 2); - - auto e2 = ub::extents<2>{1,2}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK ( e2.size() == 2); - - auto e3 = ub::extents<2>{2,1}; - BOOST_CHECK (!e3.empty()); - BOOST_CHECK ( e3.size() == 2); - - auto e4 = ub::extents<2>{2,3}; - BOOST_CHECK(!e4.empty()); - BOOST_CHECK ( e4.size() == 2); - - auto e5 = ub::extents<3>{2,3,1}; - BOOST_CHECK (!e5.empty()); - BOOST_CHECK ( e5.size() == 3); - - auto e6 = ub::extents<3>{1,2,3}; // 6 - BOOST_CHECK(!e6.empty()); - BOOST_CHECK ( e6.size() == 3); - - auto e7 = ub::extents<3>{4,2,3}; // 7 - BOOST_CHECK(!e7.empty()); - BOOST_CHECK ( e7.size() == 3); - - BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({0} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({3} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::out_of_range); + namespace ub = boost::numeric::ublas; + + +// auto e = ub::extents<0>{}; + auto e11 = ub::extents<2>{1,1}; + auto e12 = ub::extents<2>{1,2}; + auto e21 = ub::extents<2>{2,1}; + auto e23 = ub::extents<2>{2,3}; + auto e231 = ub::extents<3>{2,3,1}; + auto e123 = ub::extents<3>{1,2,3}; // 6 + auto e423 = ub::extents<3>{4,2,3}; // 7 + + + BOOST_CHECK (!ub::empty(e11)); + BOOST_CHECK (!ub::empty(e12)); + BOOST_CHECK (!ub::empty(e21)); + BOOST_CHECK (!ub::empty(e23)); + BOOST_CHECK (!ub::empty(e231)); + BOOST_CHECK (!ub::empty(e123)); + BOOST_CHECK (!ub::empty(e423)); + + BOOST_CHECK ( ub::size (e11) == 2); + BOOST_CHECK ( ub::size (e12) == 2); + BOOST_CHECK ( ub::size (e21) == 2); + BOOST_CHECK ( ub::size (e23) == 2); + BOOST_CHECK ( ub::size(e231) == 3); + BOOST_CHECK ( ub::size(e123) == 3); + BOOST_CHECK ( ub::size(e423) == 3); + + + BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<1>({0} ), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::length_error); } struct fixture { - template - using extents = boost::numeric::ublas::extents; - - extents<0> de0{}; // 0 - - extents<2> de1{1,1}; // 1 - extents<2> de2{1,2}; // 2 - extents<2> de3{2,1}; // 3 - - extents<2> de4{2,3}; // 4 - extents<3> de5{2,3,1}; // 5 - extents<3> de6{1,2,3}; // 6 - extents<4> de7{1,1,2,3}; // 7 - extents<5> de8{1,2,3,1,1}; // 8 - - extents<3> de9{4,2,3}; // 9 - extents<4> de10{4,2,1,3}; // 10 - extents<5> de11{4,2,1,3,1}; // 11 - extents<6> de12{1,4,2,1,3,1};// 12 - - extents<3> de13{1,4,1}; // 13 - extents<4> de14{1,1,1,1}; // 14 - extents<5> de15{1,4,1,1,1}; // 15 - extents<6> de16{1,1,2,1,1,1};// 16 - extents<6> de17{1,1,2,3,1,1};// 17 + template + using extents = boost::numeric::ublas::extents; + +// extents<0> de {}; + + extents<2> de11 {1,1}; + extents<2> de12 {1,2}; + extents<2> de21 {2,1}; + + extents<2> de23 {2,3}; + extents<3> de231 {2,3,1}; + extents<3> de123 {1,2,3}; + extents<4> de1123 {1,1,2,3}; + extents<5> de12311 {1,2,3,1,1}; + + extents<3> de423 {4,2,3}; + extents<4> de4213 {4,2,1,3}; + extents<5> de42131 {4,2,1,3,1}; + extents<6> de142131 {1,4,2,1,3,1}; + + extents<3> de141 {1,4,1}; + extents<4> de1111 {1,1,1,1}; + extents<5> de14111 {1,4,1,1,1}; + extents<6> de112111 {1,1,2,1,1,1}; + extents<6> de112311 {1,1,2,3,1,1}; }; -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_access, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("access")) -{ - using namespace boost::numeric; - - BOOST_CHECK_EQUAL(de0.size(), 0); - BOOST_CHECK (de0.empty() ); - - BOOST_REQUIRE_EQUAL(de1.size(), 2); - BOOST_REQUIRE_EQUAL(de2.size(), 2); - BOOST_REQUIRE_EQUAL(de3.size(), 2); - BOOST_REQUIRE_EQUAL(de4.size(), 2); - BOOST_REQUIRE_EQUAL(de5.size(), 3); - BOOST_REQUIRE_EQUAL(de6.size(), 3); - BOOST_REQUIRE_EQUAL(de7.size(), 4); - BOOST_REQUIRE_EQUAL(de8.size(), 5); - BOOST_REQUIRE_EQUAL(de9.size(), 3); - BOOST_REQUIRE_EQUAL(de10.size(), 4); - BOOST_REQUIRE_EQUAL(de11.size(), 5); - BOOST_REQUIRE_EQUAL(de12.size(), 6); - BOOST_REQUIRE_EQUAL(de13.size(), 3); - BOOST_REQUIRE_EQUAL(de14.size(), 4); - BOOST_REQUIRE_EQUAL(de15.size(), 5); - BOOST_REQUIRE_EQUAL(de16.size(), 6); - BOOST_REQUIRE_EQUAL(de17.size(), 6); - - - BOOST_CHECK_EQUAL(de1[0],1); - BOOST_CHECK_EQUAL(de1[1],1); - - BOOST_CHECK_EQUAL(de2[0],1); - BOOST_CHECK_EQUAL(de2[1],2); - - BOOST_CHECK_EQUAL(de3[0],2); - BOOST_CHECK_EQUAL(de3[1],1); - - BOOST_CHECK_EQUAL(de4[0],2); - BOOST_CHECK_EQUAL(de4[1],3); - - BOOST_CHECK_EQUAL(de5[0],2); - BOOST_CHECK_EQUAL(de5[1],3); - BOOST_CHECK_EQUAL(de5[2],1); - - BOOST_CHECK_EQUAL(de6[0],1); - BOOST_CHECK_EQUAL(de6[1],2); - BOOST_CHECK_EQUAL(de6[2],3); - - BOOST_CHECK_EQUAL(de7[0],1); - BOOST_CHECK_EQUAL(de7[1],1); - BOOST_CHECK_EQUAL(de7[2],2); - BOOST_CHECK_EQUAL(de7[3],3); - - BOOST_CHECK_EQUAL(de8[0],1); - BOOST_CHECK_EQUAL(de8[1],2); - BOOST_CHECK_EQUAL(de8[2],3); - BOOST_CHECK_EQUAL(de8[3],1); - BOOST_CHECK_EQUAL(de8[4],1); - - BOOST_CHECK_EQUAL(de9[0],4); - BOOST_CHECK_EQUAL(de9[1],2); - BOOST_CHECK_EQUAL(de9[2],3); - - BOOST_CHECK_EQUAL(de10[0],4); - BOOST_CHECK_EQUAL(de10[1],2); - BOOST_CHECK_EQUAL(de10[2],1); - BOOST_CHECK_EQUAL(de10[3],3); - - BOOST_CHECK_EQUAL(de11[0],4); - BOOST_CHECK_EQUAL(de11[1],2); - BOOST_CHECK_EQUAL(de11[2],1); - BOOST_CHECK_EQUAL(de11[3],3); - BOOST_CHECK_EQUAL(de11[4],1); - - BOOST_CHECK_EQUAL(de12[0],1); - BOOST_CHECK_EQUAL(de12[1],4); - BOOST_CHECK_EQUAL(de12[2],2); - BOOST_CHECK_EQUAL(de12[3],1); - BOOST_CHECK_EQUAL(de12[4],3); - BOOST_CHECK_EQUAL(de12[5],1); - - BOOST_CHECK_EQUAL(de13[0],1); - BOOST_CHECK_EQUAL(de13[1],4); - BOOST_CHECK_EQUAL(de13[2],1); - - BOOST_CHECK_EQUAL(de14[0],1); - BOOST_CHECK_EQUAL(de14[1],1); - BOOST_CHECK_EQUAL(de14[2],1); - BOOST_CHECK_EQUAL(de14[3],1); - - BOOST_CHECK_EQUAL(de15[0],1); - BOOST_CHECK_EQUAL(de15[1],4); - BOOST_CHECK_EQUAL(de15[2],1); - BOOST_CHECK_EQUAL(de15[3],1); - BOOST_CHECK_EQUAL(de15[4],1); - - BOOST_CHECK_EQUAL(de16[0],1); - BOOST_CHECK_EQUAL(de16[1],1); - BOOST_CHECK_EQUAL(de16[2],2); - BOOST_CHECK_EQUAL(de16[3],1); - BOOST_CHECK_EQUAL(de16[4],1); - BOOST_CHECK_EQUAL(de16[5],1); - - BOOST_CHECK_EQUAL(de17[0],1); - BOOST_CHECK_EQUAL(de17[1],1); - BOOST_CHECK_EQUAL(de17[2],2); - BOOST_CHECK_EQUAL(de17[3],3); - BOOST_CHECK_EQUAL(de17[4],1); - BOOST_CHECK_EQUAL(de17[5],1); -} - -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_copy_ctor, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("copy_ctor")) +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_access, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("access")) { - auto e0 = de0; // {} - auto e1 = de1; // {1,1} - auto e2 = de2; // {1,2} - auto e3 = de3; // {2,1} - auto e4 = de4; // {2,3} - auto e5 = de5; // {2,3,1} - auto e6 = de6; // {1,2,3} - auto e7 = de7; // {1,1,2,3} - auto e8 = de8; // {1,2,3,1,1} - auto e9 = de9; // {4,2,3} - auto e10 = de10; // {4,2,1,3} - auto e11 = de11; // {4,2,1,3,1} - auto e12 = de12; // {1,4,2,1,3,1} - auto e13 = de13; // {1,4,1} - auto e14 = de14; // {1,1,1,1} - auto e15 = de15; // {1,4,1,1,1} - auto e16 = de16; // {1,1,2,1,1,1} - auto e17 = de17; // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL (e0.size(), 0); - BOOST_CHECK (e0.empty() ); - - BOOST_REQUIRE_EQUAL(e1 .size(), 2); - BOOST_REQUIRE_EQUAL(e2 .size(), 2); - BOOST_REQUIRE_EQUAL(e3 .size(), 2); - BOOST_REQUIRE_EQUAL(e4 .size(), 2); - BOOST_REQUIRE_EQUAL(e5 .size(), 3); - BOOST_REQUIRE_EQUAL(e6 .size(), 3); - BOOST_REQUIRE_EQUAL(e7 .size(), 4); - BOOST_REQUIRE_EQUAL(e8 .size(), 5); - BOOST_REQUIRE_EQUAL(e9 .size(), 3); - BOOST_REQUIRE_EQUAL(e10.size(), 4); - BOOST_REQUIRE_EQUAL(e11.size(), 5); - BOOST_REQUIRE_EQUAL(e12.size(), 6); - BOOST_REQUIRE_EQUAL(e13.size(), 3); - BOOST_REQUIRE_EQUAL(e14.size(), 4); - BOOST_REQUIRE_EQUAL(e15.size(), 5); - BOOST_REQUIRE_EQUAL(e16.size(), 6); - BOOST_REQUIRE_EQUAL(e17.size(), 6); - - - BOOST_CHECK_EQUAL(e1[0],1); - BOOST_CHECK_EQUAL(e1[1],1); - - BOOST_CHECK_EQUAL(e2[0],1); - BOOST_CHECK_EQUAL(e2[1],2); - - BOOST_CHECK_EQUAL(e3[0],2); - BOOST_CHECK_EQUAL(e3[1],1); - - BOOST_CHECK_EQUAL(e4[0],2); - BOOST_CHECK_EQUAL(e4[1],3); - - BOOST_CHECK_EQUAL(e5[0],2); - BOOST_CHECK_EQUAL(e5[1],3); - BOOST_CHECK_EQUAL(e5[2],1); - - BOOST_CHECK_EQUAL(e6[0],1); - BOOST_CHECK_EQUAL(e6[1],2); - BOOST_CHECK_EQUAL(e6[2],3); - - BOOST_CHECK_EQUAL(e7[0],1); - BOOST_CHECK_EQUAL(e7[1],1); - BOOST_CHECK_EQUAL(e7[2],2); - BOOST_CHECK_EQUAL(e7[3],3); - - BOOST_CHECK_EQUAL(e8[0],1); - BOOST_CHECK_EQUAL(e8[1],2); - BOOST_CHECK_EQUAL(e8[2],3); - BOOST_CHECK_EQUAL(e8[3],1); - BOOST_CHECK_EQUAL(e8[4],1); - - BOOST_CHECK_EQUAL(e9[0],4); - BOOST_CHECK_EQUAL(e9[1],2); - BOOST_CHECK_EQUAL(e9[2],3); - - BOOST_CHECK_EQUAL(e10[0],4); - BOOST_CHECK_EQUAL(e10[1],2); - BOOST_CHECK_EQUAL(e10[2],1); - BOOST_CHECK_EQUAL(e10[3],3); - - BOOST_CHECK_EQUAL(e11[0],4); - BOOST_CHECK_EQUAL(e11[1],2); - BOOST_CHECK_EQUAL(e11[2],1); - BOOST_CHECK_EQUAL(e11[3],3); - BOOST_CHECK_EQUAL(e11[4],1); - - BOOST_CHECK_EQUAL(e12[0],1); - BOOST_CHECK_EQUAL(e12[1],4); - BOOST_CHECK_EQUAL(e12[2],2); - BOOST_CHECK_EQUAL(e12[3],1); - BOOST_CHECK_EQUAL(e12[4],3); - BOOST_CHECK_EQUAL(e12[5],1); - - BOOST_CHECK_EQUAL(e13[0],1); - BOOST_CHECK_EQUAL(e13[1],4); - BOOST_CHECK_EQUAL(e13[2],1); - - BOOST_CHECK_EQUAL(e14[0],1); - BOOST_CHECK_EQUAL(e14[1],1); - BOOST_CHECK_EQUAL(e14[2],1); - BOOST_CHECK_EQUAL(e14[3],1); - - BOOST_CHECK_EQUAL(e15[0],1); - BOOST_CHECK_EQUAL(e15[1],4); - BOOST_CHECK_EQUAL(e15[2],1); - BOOST_CHECK_EQUAL(e15[3],1); - BOOST_CHECK_EQUAL(e15[4],1); - - BOOST_CHECK_EQUAL(e16[0],1); - BOOST_CHECK_EQUAL(e16[1],1); - BOOST_CHECK_EQUAL(e16[2],2); - BOOST_CHECK_EQUAL(e16[3],1); - BOOST_CHECK_EQUAL(e16[4],1); - BOOST_CHECK_EQUAL(e16[5],1); - - BOOST_CHECK_EQUAL(e17[0],1); - BOOST_CHECK_EQUAL(e17[1],1); - BOOST_CHECK_EQUAL(e17[2],2); - BOOST_CHECK_EQUAL(e17[3],3); - BOOST_CHECK_EQUAL(e17[4],1); - BOOST_CHECK_EQUAL(e17[5],1); - + namespace ublas = boost::numeric::ublas; + +// BOOST_REQUIRE_EQUAL(ublas::size(de), 0); +// BOOST_CHECK (ublas::empty(de) ); + + BOOST_REQUIRE_EQUAL(ublas::size(de11) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de12) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de21) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de23) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de231) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de123) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de1123) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de12311) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de423) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de4213) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de42131) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de142131), 6); + BOOST_REQUIRE_EQUAL(ublas::size(de141) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de1111) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de14111) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de112111), 6); + BOOST_REQUIRE_EQUAL(ublas::size(de112311), 6); + + + BOOST_CHECK_EQUAL(de11[0],1); + BOOST_CHECK_EQUAL(de11[1],1); + + BOOST_CHECK_EQUAL(de12[0],1); + BOOST_CHECK_EQUAL(de12[1],2); + + BOOST_CHECK_EQUAL(de21[0],2); + BOOST_CHECK_EQUAL(de21[1],1); + + BOOST_CHECK_EQUAL(de23[0],2); + BOOST_CHECK_EQUAL(de23[1],3); + + BOOST_CHECK_EQUAL(de231[0],2); + BOOST_CHECK_EQUAL(de231[1],3); + BOOST_CHECK_EQUAL(de231[2],1); + + BOOST_CHECK_EQUAL(de123[0],1); + BOOST_CHECK_EQUAL(de123[1],2); + BOOST_CHECK_EQUAL(de123[2],3); + + BOOST_CHECK_EQUAL(de1123[0],1); + BOOST_CHECK_EQUAL(de1123[1],1); + BOOST_CHECK_EQUAL(de1123[2],2); + BOOST_CHECK_EQUAL(de1123[3],3); + + BOOST_CHECK_EQUAL(de12311[0],1); + BOOST_CHECK_EQUAL(de12311[1],2); + BOOST_CHECK_EQUAL(de12311[2],3); + BOOST_CHECK_EQUAL(de12311[3],1); + BOOST_CHECK_EQUAL(de12311[4],1); + + BOOST_CHECK_EQUAL(de423[0],4); + BOOST_CHECK_EQUAL(de423[1],2); + BOOST_CHECK_EQUAL(de423[2],3); + + BOOST_CHECK_EQUAL(de4213[0],4); + BOOST_CHECK_EQUAL(de4213[1],2); + BOOST_CHECK_EQUAL(de4213[2],1); + BOOST_CHECK_EQUAL(de4213[3],3); + + BOOST_CHECK_EQUAL(de42131[0],4); + BOOST_CHECK_EQUAL(de42131[1],2); + BOOST_CHECK_EQUAL(de42131[2],1); + BOOST_CHECK_EQUAL(de42131[3],3); + BOOST_CHECK_EQUAL(de42131[4],1); + + BOOST_CHECK_EQUAL(de142131[0],1); + BOOST_CHECK_EQUAL(de142131[1],4); + BOOST_CHECK_EQUAL(de142131[2],2); + BOOST_CHECK_EQUAL(de142131[3],1); + BOOST_CHECK_EQUAL(de142131[4],3); + BOOST_CHECK_EQUAL(de142131[5],1); + + BOOST_CHECK_EQUAL(de141[0],1); + BOOST_CHECK_EQUAL(de141[1],4); + BOOST_CHECK_EQUAL(de141[2],1); + + BOOST_CHECK_EQUAL(de1111[0],1); + BOOST_CHECK_EQUAL(de1111[1],1); + BOOST_CHECK_EQUAL(de1111[2],1); + BOOST_CHECK_EQUAL(de1111[3],1); + + BOOST_CHECK_EQUAL(de14111[0],1); + BOOST_CHECK_EQUAL(de14111[1],4); + BOOST_CHECK_EQUAL(de14111[2],1); + BOOST_CHECK_EQUAL(de14111[3],1); + BOOST_CHECK_EQUAL(de14111[4],1); + + BOOST_CHECK_EQUAL(de112111[0],1); + BOOST_CHECK_EQUAL(de112111[1],1); + BOOST_CHECK_EQUAL(de112111[2],2); + BOOST_CHECK_EQUAL(de112111[3],1); + BOOST_CHECK_EQUAL(de112111[4],1); + BOOST_CHECK_EQUAL(de112111[5],1); + + BOOST_CHECK_EQUAL(de112311[0],1); + BOOST_CHECK_EQUAL(de112311[1],1); + BOOST_CHECK_EQUAL(de112311[2],2); + BOOST_CHECK_EQUAL(de112311[3],3); + BOOST_CHECK_EQUAL(de112311[4],1); + BOOST_CHECK_EQUAL(de112311[5],1); } -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_is, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("query")) +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_copy_ctor, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("copy_ctor")) { + namespace ublas = boost::numeric::ublas; + +// auto e = de; + auto e1 = de11; + auto e12 = de12; + auto e21 = de21; + auto e23 = de23; + auto e231 = de231; + auto e123 = de123; + auto e1123 = de1123; + auto e12311 = de12311; + auto e423 = de423; + auto e4213 = de4213; + auto e42131 = de42131; + auto e142131 = de142131; + auto e141 = de141; + auto e1111 = de1111; + auto e14111 = de14111; + auto e112111 = de112111; + auto e112311 = de112311; + + +// BOOST_CHECK (ublas::empty(e) ); + +// BOOST_REQUIRE_EQUAL(ublas::size(e) , 0); + BOOST_REQUIRE_EQUAL(ublas::size(e1) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e12) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e21) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e23) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e231), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e123), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e1123), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e12311), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e423), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e4213), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e42131), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e142131), 6); + BOOST_REQUIRE_EQUAL(ublas::size(e141), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e1111), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e14111), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e112111), 6); + BOOST_REQUIRE_EQUAL(ublas::size(e112311), 6); + + + BOOST_CHECK_EQUAL(e1[0],1); + BOOST_CHECK_EQUAL(e1[1],1); + + BOOST_CHECK_EQUAL(e12[0],1); + BOOST_CHECK_EQUAL(e12[1],2); + + BOOST_CHECK_EQUAL(e21[0],2); + BOOST_CHECK_EQUAL(e21[1],1); + + BOOST_CHECK_EQUAL(e23[0],2); + BOOST_CHECK_EQUAL(e23[1],3); + + BOOST_CHECK_EQUAL(e231[0],2); + BOOST_CHECK_EQUAL(e231[1],3); + BOOST_CHECK_EQUAL(e231[2],1); + + BOOST_CHECK_EQUAL(e123[0],1); + BOOST_CHECK_EQUAL(e123[1],2); + BOOST_CHECK_EQUAL(e123[2],3); + + BOOST_CHECK_EQUAL(e1123[0],1); + BOOST_CHECK_EQUAL(e1123[1],1); + BOOST_CHECK_EQUAL(e1123[2],2); + BOOST_CHECK_EQUAL(e1123[3],3); + + BOOST_CHECK_EQUAL(e12311[0],1); + BOOST_CHECK_EQUAL(e12311[1],2); + BOOST_CHECK_EQUAL(e12311[2],3); + BOOST_CHECK_EQUAL(e12311[3],1); + BOOST_CHECK_EQUAL(e12311[4],1); + + BOOST_CHECK_EQUAL(e423[0],4); + BOOST_CHECK_EQUAL(e423[1],2); + BOOST_CHECK_EQUAL(e423[2],3); + + BOOST_CHECK_EQUAL(e4213[0],4); + BOOST_CHECK_EQUAL(e4213[1],2); + BOOST_CHECK_EQUAL(e4213[2],1); + BOOST_CHECK_EQUAL(e4213[3],3); + + BOOST_CHECK_EQUAL(e42131[0],4); + BOOST_CHECK_EQUAL(e42131[1],2); + BOOST_CHECK_EQUAL(e42131[2],1); + BOOST_CHECK_EQUAL(e42131[3],3); + BOOST_CHECK_EQUAL(e42131[4],1); + + BOOST_CHECK_EQUAL(e142131[0],1); + BOOST_CHECK_EQUAL(e142131[1],4); + BOOST_CHECK_EQUAL(e142131[2],2); + BOOST_CHECK_EQUAL(e142131[3],1); + BOOST_CHECK_EQUAL(e142131[4],3); + BOOST_CHECK_EQUAL(e142131[5],1); + + BOOST_CHECK_EQUAL(e141[0],1); + BOOST_CHECK_EQUAL(e141[1],4); + BOOST_CHECK_EQUAL(e141[2],1); + + BOOST_CHECK_EQUAL(e1111[0],1); + BOOST_CHECK_EQUAL(e1111[1],1); + BOOST_CHECK_EQUAL(e1111[2],1); + BOOST_CHECK_EQUAL(e1111[3],1); + + BOOST_CHECK_EQUAL(e14111[0],1); + BOOST_CHECK_EQUAL(e14111[1],4); + BOOST_CHECK_EQUAL(e14111[2],1); + BOOST_CHECK_EQUAL(e14111[3],1); + BOOST_CHECK_EQUAL(e14111[4],1); + + BOOST_CHECK_EQUAL(e112111[0],1); + BOOST_CHECK_EQUAL(e112111[1],1); + BOOST_CHECK_EQUAL(e112111[2],2); + BOOST_CHECK_EQUAL(e112111[3],1); + BOOST_CHECK_EQUAL(e112111[4],1); + BOOST_CHECK_EQUAL(e112111[5],1); + + BOOST_CHECK_EQUAL(e112311[0],1); + BOOST_CHECK_EQUAL(e112311[1],1); + BOOST_CHECK_EQUAL(e112311[2],2); + BOOST_CHECK_EQUAL(e112311[3],3); + BOOST_CHECK_EQUAL(e112311[4],1); + BOOST_CHECK_EQUAL(e112311[5],1); - auto e0 = de0; // {} - auto e1 = de1; // {1,1} - auto e2 = de2; // {1,2} - auto e3 = de3; // {2,1} - auto e4 = de4; // {2,3} - auto e5 = de5; // {2,3,1} - auto e6 = de6; // {1,2,3} - auto e7 = de7; // {1,1,2,3} - auto e8 = de8; // {1,2,3,1,1} - auto e9 = de9; // {4,2,3} - auto e10 = de10; // {4,2,1,3} - auto e11 = de11; // {4,2,1,3,1} - auto e12 = de12; // {1,4,2,1,3,1} - auto e13 = de13; // {1,4,1} - auto e14 = de14; // {1,1,1,1} - auto e15 = de15; // {1,4,1,1,1} - auto e16 = de16; // {1,1,2,1,1,1} - auto e17 = de17; // {1,1,2,3,1,1} - - BOOST_CHECK( e0.empty ( )); - BOOST_CHECK( ! is_scalar(e0)); - BOOST_CHECK( ! is_vector(e0)); - BOOST_CHECK( ! is_matrix(e0)); - BOOST_CHECK( ! is_tensor(e0)); - - BOOST_CHECK( ! e1.empty ( ) ); - BOOST_CHECK( is_scalar(e1) ); - BOOST_CHECK( ! is_vector(e1) ); - BOOST_CHECK( ! is_matrix(e1) ); - BOOST_CHECK( ! is_tensor(e1) ); - - BOOST_CHECK( ! e2.empty ( ) ); - BOOST_CHECK( ! is_scalar(e2) ); - BOOST_CHECK( is_vector(e2) ); - BOOST_CHECK( ! is_matrix(e2) ); - BOOST_CHECK( ! is_tensor(e2) ); - - BOOST_CHECK( ! e3.empty ( ) ); - BOOST_CHECK( ! is_scalar(e3) ); - BOOST_CHECK( is_vector(e3) ); - BOOST_CHECK( ! is_matrix(e3) ); - BOOST_CHECK( ! is_tensor(e3) ); - - BOOST_CHECK( ! e4.empty ( ) ); - BOOST_CHECK( ! is_scalar(e4) ); - BOOST_CHECK( ! is_vector(e4) ); - BOOST_CHECK( is_matrix(e4) ); - BOOST_CHECK( ! is_tensor(e4) ); - - BOOST_CHECK( ! e5.empty ( ) ); - BOOST_CHECK( ! is_scalar(e5) ); - BOOST_CHECK( ! is_vector(e5) ); - BOOST_CHECK( is_matrix(e5) ); - BOOST_CHECK( ! is_tensor(e5) ); - - BOOST_CHECK( ! e6.empty ( ) ); - BOOST_CHECK( ! is_scalar(e6) ); - BOOST_CHECK( ! is_vector(e6) ); - BOOST_CHECK( ! is_matrix(e6) ); - BOOST_CHECK( is_tensor(e6) ); - - BOOST_CHECK( ! e7.empty ( ) ); - BOOST_CHECK( ! is_scalar(e7) ); - BOOST_CHECK( ! is_vector(e7) ); - BOOST_CHECK( ! is_matrix(e7) ); - BOOST_CHECK( is_tensor(e7) ); - - BOOST_CHECK( ! e8.empty ( ) ); - BOOST_CHECK( ! is_scalar(e8) ); - BOOST_CHECK( ! is_vector(e8) ); - BOOST_CHECK( ! is_matrix(e8) ); - BOOST_CHECK( is_tensor(e8) ); - - BOOST_CHECK( ! e9.empty ( ) ); - BOOST_CHECK( ! is_scalar(e9) ); - BOOST_CHECK( ! is_vector(e9) ); - BOOST_CHECK( ! is_matrix(e9) ); - BOOST_CHECK( is_tensor(e9) ); - - BOOST_CHECK( ! e10.empty( ) ); - BOOST_CHECK( ! is_scalar(e10) ); - BOOST_CHECK( ! is_vector(e10) ); - BOOST_CHECK( ! is_matrix(e10) ); - BOOST_CHECK( is_tensor(e10) ); - - BOOST_CHECK( ! e11.empty( ) ); - BOOST_CHECK( ! is_scalar(e11) ); - BOOST_CHECK( ! is_vector(e11) ); - BOOST_CHECK( ! is_matrix(e11) ); - BOOST_CHECK( is_tensor(e11) ); - - BOOST_CHECK( ! e12.empty( ) ); - BOOST_CHECK( ! is_scalar(e12) ); - BOOST_CHECK( ! is_vector(e12) ); - BOOST_CHECK( ! is_matrix(e12) ); - BOOST_CHECK( is_tensor(e12) ); - - BOOST_CHECK( ! e13.empty( ) ); - BOOST_CHECK( ! is_scalar(e13) ); - BOOST_CHECK( is_vector(e13) ); - BOOST_CHECK( ! is_matrix(e13) ); - BOOST_CHECK( ! is_tensor(e13) ); - - BOOST_CHECK( ! e14.empty( ) ); - BOOST_CHECK( is_scalar(e14) ); - BOOST_CHECK( ! is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - - BOOST_CHECK( ! e15.empty( ) ); - BOOST_CHECK( ! is_scalar(e15) ); - BOOST_CHECK( is_vector(e15) ); - BOOST_CHECK( ! is_matrix(e15) ); - BOOST_CHECK( ! is_tensor(e15) ); - - BOOST_CHECK( ! e16.empty( ) ); - BOOST_CHECK( ! is_scalar(e16) ); - BOOST_CHECK( ! is_vector(e16) ); - BOOST_CHECK( ! is_matrix(e16) ); - BOOST_CHECK( is_tensor(e16) ); - - BOOST_CHECK( ! e17.empty( ) ); - BOOST_CHECK( ! is_scalar(e17) ); - BOOST_CHECK( ! is_vector(e17) ); - BOOST_CHECK( ! is_matrix(e17) ); - BOOST_CHECK( is_tensor(e17) ); } -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_squeeze, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("squeeze")) +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_is, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("query")) { - auto e1 = squeeze(de1); // {1,1} - auto e2 = squeeze(de2); // {1,2} - auto e3 = squeeze(de3); // {2,1} - - auto e4 = squeeze(de4); // {2,3} - auto e5 = squeeze(de5); // {2,3} - auto e6 = squeeze(de6); // {2,3} - auto e7 = squeeze(de7); // {2,3} - auto e8 = squeeze(de8); // {2,3} - - auto e9 = squeeze(de9); // {4,2,3} - auto e10 = squeeze(de10); // {4,2,3} - auto e11 = squeeze(de11); // {4,2,3} - auto e12 = squeeze(de12); // {4,2,3} - - auto e13 = squeeze(de13); // {1,4} - auto e14 = squeeze(de14); // {1,1} - auto e15 = squeeze(de15); // {1,4} - auto e16 = squeeze(de16); // {2,1} - auto e17 = squeeze(de17); // {2,3} - - BOOST_CHECK( (e1 == extents<2>{1,1}) ); - BOOST_CHECK( (e2 == extents<2>{1,2}) ); - BOOST_CHECK( (e3 == extents<2>{2,1}) ); - - BOOST_CHECK( (e4 == extents<2>{2,3}) ); - BOOST_CHECK( (e5 == extents<2>{2,3}) ); - BOOST_CHECK( (e6 == extents<2>{2,3}) ); - BOOST_CHECK( (e7 == extents<2>{2,3}) ); - BOOST_CHECK( (e8 == extents<2>{2,3}) ); - - BOOST_CHECK( (e9 == extents<3>{4,2,3}) ); - BOOST_CHECK( (e10 == extents<3>{4,2,3}) ); - BOOST_CHECK( (e11 == extents<3>{4,2,3}) ); - BOOST_CHECK( (e12 == extents<3>{4,2,3}) ); - - BOOST_CHECK( (e13 == extents<2>{1,4}) ); - BOOST_CHECK( (e14 == extents<2>{1,1}) ); - BOOST_CHECK( (e15 == extents<2>{1,4}) ); - BOOST_CHECK( (e16 == extents<2>{2,1}) ); - BOOST_CHECK( (e17 == extents<2>{2,3}) ); - + namespace ublas = boost::numeric::ublas; + + +// auto e = de; + auto e11 = de11; + auto e12 = de12; + auto e21 = de21; + auto e23 = de23; + auto e231 = de231; + auto e123 = de123; + auto e1123 = de1123; + auto e12311 = de12311; + auto e423 = de423; + auto e4213 = de4213; + auto e42131 = de42131; + auto e142131 = de142131; + auto e141 = de141; + auto e1111 = de1111; + auto e14111 = de14111; + auto e112111 = de112111; + auto e112311 = de112311; + +// BOOST_CHECK( ublas::empty (e)); +// BOOST_CHECK( ! ublas::is_scalar(e)); +// BOOST_CHECK( ! ublas::is_vector(e)); +// BOOST_CHECK( ! ublas::is_matrix(e)); +// BOOST_CHECK( ! ublas::is_tensor(e)); + + BOOST_CHECK( ! ublas::empty (e11) ); + BOOST_CHECK( ublas::is_scalar(e11) ); + BOOST_CHECK( ublas::is_vector(e11) ); + BOOST_CHECK( ublas::is_matrix(e11) ); + BOOST_CHECK( ! ublas::is_tensor(e11) ); + + BOOST_CHECK( ! ublas::empty (e12) ); + BOOST_CHECK( ! ublas::is_scalar(e12) ); + BOOST_CHECK( ublas::is_vector(e12) ); + BOOST_CHECK( ublas::is_matrix(e12) ); + BOOST_CHECK( ! ublas::is_tensor(e12) ); + + BOOST_CHECK( ! ublas::empty (e21) ); + BOOST_CHECK( ! ublas::is_scalar(e21) ); + BOOST_CHECK( ublas::is_vector(e21) ); + BOOST_CHECK( ublas::is_matrix(e21) ); + BOOST_CHECK( ! ublas::is_tensor(e21) ); + + BOOST_CHECK( ! ublas::empty (e23) ); + BOOST_CHECK( ! ublas::is_scalar(e23) ); + BOOST_CHECK( ! ublas::is_vector(e23) ); + BOOST_CHECK( ublas::is_matrix(e23) ); + BOOST_CHECK( ! ublas::is_tensor(e23) ); + + BOOST_CHECK( ! ublas::empty (e231) ); + BOOST_CHECK( ! ublas::is_scalar(e231) ); + BOOST_CHECK( ! ublas::is_vector(e231) ); + BOOST_CHECK( ublas::is_matrix(e231) ); + BOOST_CHECK( ! ublas::is_tensor(e231) ); + + BOOST_CHECK( ! ublas::empty (e123) ); + BOOST_CHECK( ! ublas::is_scalar(e123) ); + BOOST_CHECK( ! ublas::is_vector(e123) ); + BOOST_CHECK( ! ublas::is_matrix(e123) ); + BOOST_CHECK( ublas::is_tensor(e123) ); + + BOOST_CHECK( ! ublas::empty (e1123) ); + BOOST_CHECK( ! ublas::is_scalar(e1123) ); + BOOST_CHECK( ! ublas::is_vector(e1123) ); + BOOST_CHECK( ! ublas::is_matrix(e1123) ); + BOOST_CHECK( ublas::is_tensor(e1123) ); + + BOOST_CHECK( ! ublas::empty (e12311) ); + BOOST_CHECK( ! ublas::is_scalar(e12311) ); + BOOST_CHECK( ! ublas::is_vector(e12311) ); + BOOST_CHECK( ! ublas::is_matrix(e12311) ); + BOOST_CHECK( ublas::is_tensor(e12311) ); + + BOOST_CHECK( ! ublas::empty (e423) ); + BOOST_CHECK( ! ublas::is_scalar(e423) ); + BOOST_CHECK( ! ublas::is_vector(e423) ); + BOOST_CHECK( ! ublas::is_matrix(e423) ); + BOOST_CHECK( ublas::is_tensor(e423) ); + + BOOST_CHECK( ! ublas::empty (e4213) ); + BOOST_CHECK( ! ublas::is_scalar(e4213) ); + BOOST_CHECK( ! ublas::is_vector(e4213) ); + BOOST_CHECK( ! ublas::is_matrix(e4213) ); + BOOST_CHECK( ublas::is_tensor(e4213) ); + + BOOST_CHECK( ! ublas::empty (e42131) ); + BOOST_CHECK( ! ublas::is_scalar(e42131) ); + BOOST_CHECK( ! ublas::is_vector(e42131) ); + BOOST_CHECK( ! ublas::is_matrix(e42131) ); + BOOST_CHECK( ublas::is_tensor(e42131) ); + + BOOST_CHECK( ! ublas::empty (e142131) ); + BOOST_CHECK( ! ublas::is_scalar(e142131) ); + BOOST_CHECK( ! ublas::is_vector(e142131) ); + BOOST_CHECK( ! ublas::is_matrix(e142131) ); + BOOST_CHECK( ublas::is_tensor(e142131) ); + + BOOST_CHECK( ! ublas::empty (e141) ); + BOOST_CHECK( ! ublas::is_scalar(e141) ); + BOOST_CHECK( ublas::is_vector(e141) ); + BOOST_CHECK( ublas::is_matrix(e141) ); + BOOST_CHECK( ! ublas::is_tensor(e141) ); + + BOOST_CHECK( ! ublas::empty (e1111) ); + BOOST_CHECK( ublas::is_scalar(e1111) ); + BOOST_CHECK( ublas::is_vector(e1111) ); + BOOST_CHECK( ublas::is_matrix(e1111) ); + BOOST_CHECK( ! ublas::is_tensor(e1111) ); + + BOOST_CHECK( ! ublas::empty (e14111) ); + BOOST_CHECK( ! ublas::is_scalar(e14111) ); + BOOST_CHECK( ublas::is_vector(e14111) ); + BOOST_CHECK( ublas::is_matrix(e14111) ); + BOOST_CHECK( ! ublas::is_tensor(e14111) ); + + BOOST_CHECK( ! ublas::empty (e112111) ); + BOOST_CHECK( ! ublas::is_scalar(e112111) ); + BOOST_CHECK( ! ublas::is_vector(e112111) ); + BOOST_CHECK( ! ublas::is_matrix(e112111) ); + BOOST_CHECK( ublas::is_tensor(e112111) ); + + BOOST_CHECK( ! ublas::empty (e112311) ); + BOOST_CHECK( ! ublas::is_scalar(e112311) ); + BOOST_CHECK( ! ublas::is_vector(e112311) ); + BOOST_CHECK( ! ublas::is_matrix(e112311) ); + BOOST_CHECK( ublas::is_tensor(e112311) ); } - -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_product, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("product")) +//BOOST_FIXTURE_TEST_CASE(test_extents_static_size_squeeze, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("squeeze")) +//{ +// auto e1 = squeeze(de1); // {1,1} +// auto e2 = squeeze(de2); // {1,2} +// auto 21 = squeeze(d21); // {2,1} + +// auto e4 = squeeze(de4); // {2,3} +// auto e231 = squeeze(de231); // {2,3} +// auto e123 = squeeze(de123); // {2,3} +// auto e1123 = squeeze(de1123); // {2,3} +// auto e12311 = squeeze(de12311); // {2,3} + +// auto e423 = squeeze(de423); // {4,2,3} +// auto e4213 = squeeze(de4213); // {4,2,3} +// auto e11 = squeeze(de11); // {4,2,3} +// auto e12 = squeeze(e142131); // {4,2,3} + +// auto e141 = squeeze(de141); // {1,4} +// auto e1111 = squeeze(de1111); // {1,1} +// auto e14111 = squeeze(de14111); // {1,4} +// auto e112111 = squeeze(de112111); // {2,1} +// auto e112311 = squeeze(de112311); // {2,3} + +// BOOST_CHECK( (e1 == extents<2>{1,1}) ); +// BOOST_CHECK( (e2 == extents<2>{1,2}) ); +// BOOST_CHECK( (21 == extents<2>{2,1}) ); + +// BOOST_CHECK( (e4 == extents<2>{2,3}) ); +// BOOST_CHECK( (e231 == extents<2>{2,3}) ); +// BOOST_CHECK( (e123 == extents<2>{2,3}) ); +// BOOST_CHECK( (e1123 == extents<2>{2,3}) ); +// BOOST_CHECK( (e12311 == extents<2>{2,3}) ); + +// BOOST_CHECK( (e423 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e4213 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e11 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e12 == extents<3>{4,2,3}) ); + +// BOOST_CHECK( (e141 == extents<2>{1,4}) ); +// BOOST_CHECK( (e1111 == extents<2>{1,1}) ); +// BOOST_CHECK( (e14111 == extents<2>{1,4}) ); +// BOOST_CHECK( (e112111 == extents<2>{2,1}) ); +// BOOST_CHECK( (e112311 == extents<2>{2,3}) ); + +//} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_product, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("product")) { - - auto e0 = product( de0 ); // {} - auto e1 = product( de1 ); // {1,1} - auto e2 = product( de2 ); // {1,2} - auto e3 = product( de3 ); // {2,1} - auto e4 = product( de4 ); // {2,3} - auto e5 = product( de5 ); // {2,3,1} - auto e6 = product( de6 ); // {1,2,3} - auto e7 = product( de7 ); // {1,1,2,3} - auto e8 = product( de8 ); // {1,2,3,1,1} - auto e9 = product( de9 ); // {4,2,3} - auto e10 = product( de10 ); // {4,2,1,3} - auto e11 = product( de11 ); // {4,2,1,3,1} - auto e12 = product( de12 ); // {1,4,2,1,3,1} - auto e13 = product( de13 ); // {1,4,1} - auto e14 = product( de14 ); // {1,1,1,1} - auto e15 = product( de15 ); // {1,4,1,1,1} - auto e16 = product( de16 ); // {1,1,2,1,1,1} - auto e17 = product( de17 ); // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL( e0 , 0 ); - BOOST_CHECK_EQUAL( e1 , 1 ); - BOOST_CHECK_EQUAL( e2 , 2 ); - BOOST_CHECK_EQUAL( e3 , 2 ); - BOOST_CHECK_EQUAL( e4 , 6 ); - BOOST_CHECK_EQUAL( e5 , 6 ); - BOOST_CHECK_EQUAL( e6 , 6 ); - BOOST_CHECK_EQUAL( e7 , 6 ); - BOOST_CHECK_EQUAL( e8 , 6 ); - BOOST_CHECK_EQUAL( e9 , 24 ); - BOOST_CHECK_EQUAL( e10, 24 ); - BOOST_CHECK_EQUAL( e11, 24 ); - BOOST_CHECK_EQUAL( e12, 24 ); - BOOST_CHECK_EQUAL( e13, 4 ); - BOOST_CHECK_EQUAL( e14, 1 ); - BOOST_CHECK_EQUAL( e15, 4 ); - BOOST_CHECK_EQUAL( e16, 2 ); - BOOST_CHECK_EQUAL( e17, 6 ); + namespace ublas = boost::numeric::ublas; + +// auto e = ublas::product( de ); + auto e11 = ublas::product( de11 ); + auto e12 = ublas::product( de12 ); + auto e21 = ublas::product( de21 ); + auto e23 = ublas::product( de23 ); + auto e231 = ublas::product( de231 ); + auto e123 = ublas::product( de123 ); + auto e1123 = ublas::product( de1123 ); + auto e12311 = ublas::product( de12311 ); + auto e423 = ublas::product( de423 ); + auto e4213 = ublas::product( de4213 ); + auto e42131 = ublas::product( de42131 ); + auto e142131 = ublas::product( de142131 ); + auto e141 = ublas::product( de141 ); + auto e1111 = ublas::product( de1111 ); + auto e14111 = ublas::product( de14111 ); + auto e112111 = ublas::product( de112111 ); + auto e112311 = ublas::product( de112311 ); + +// BOOST_CHECK_EQUAL( e , 0 ); + BOOST_CHECK_EQUAL( e11 , 1 ); + BOOST_CHECK_EQUAL( e12 , 2 ); + BOOST_CHECK_EQUAL( e21 , 2 ); + BOOST_CHECK_EQUAL( e23 , 6 ); + BOOST_CHECK_EQUAL( e231 , 6 ); + BOOST_CHECK_EQUAL( e123 , 6 ); + BOOST_CHECK_EQUAL( e1123 , 6 ); + BOOST_CHECK_EQUAL( e12311 , 6 ); + BOOST_CHECK_EQUAL( e423 , 24 ); + BOOST_CHECK_EQUAL( e4213 , 24 ); + BOOST_CHECK_EQUAL( e42131 , 24 ); + BOOST_CHECK_EQUAL( e142131, 24 ); + BOOST_CHECK_EQUAL( e141 , 4 ); + BOOST_CHECK_EQUAL( e1111 , 1 ); + BOOST_CHECK_EQUAL( e14111 , 4 ); + BOOST_CHECK_EQUAL( e112111, 2 ); + BOOST_CHECK_EQUAL( e112311, 6 ); } diff --git a/test/tensor/test_fixed_rank_functions.cpp b/test/tensor/test_fixed_rank_functions.cpp index 8ccea3f76..df3d4ce80 100644 --- a/test/tensor/test_fixed_rank_functions.cpp +++ b/test/tensor/test_fixed_rank_functions.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -23,7 +23,7 @@ #include "utility.hpp" // BOOST_AUTO_TEST_SUITE ( test_tensor_functions, * boost::unit_test::depends_on("test_tensor_contraction") ) -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_tensor_functions) +BOOST_AUTO_TEST_SUITE ( test_tensor_extents_static_size_functions) using test_types = zip>::with_t; @@ -33,372 +33,427 @@ using test_types = zip>::with_t - using fixed_rank_extents_type = boost::numeric::ublas::extents; - - using dynamic_extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents { - dynamic_extents_type{1,1}, // 1 - dynamic_extents_type{2,3}, // 2 - dynamic_extents_type{2,3,1}, // 3 - dynamic_extents_type{4,2,3}, // 4 - dynamic_extents_type{4,2,3,5}} // 5 - { - } - - std::tuple< - fixed_rank_extents_type<2>, - fixed_rank_extents_type<2>, - fixed_rank_extents_type<3>, - fixed_rank_extents_type<3>, - fixed_rank_extents_type<4> - > fixed_rank_extents{ - {1,1}, // 1 - {2,3}, // 2 - {2,3,1}, // 3 - {4,2,3}, // 4 - {4,2,3,5} // 5 + std::tuple< + boost::numeric::ublas::extents<2>, + boost::numeric::ublas::extents<2>, + boost::numeric::ublas::extents<3>, + boost::numeric::ublas::extents<3>, + boost::numeric::ublas::extents<4> + > extents_tuple{ + {1,1}, // 1 + {2,3}, // 2 + {2,3,1}, // 3 + {4,2,3}, // 4 + {4,2,3,5} // 5 }; - std::vector extents; + std::vector> extents_vector = + { + {1,1}, // 1 + {2,3}, // 2 + {2,3,1}, // 3 + {4,2,3}, // 4 + {4,2,3,5} // 5 + }; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_vector, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_vector, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(fixed_rank_extents,[](auto const&, auto & n){ - using extents_type = typename std::decay::type; - using tensor_type = ublas::fixed_rank_tensor; - using vector_type = typename tensor_type::vector_type; - auto a = tensor_type(n, value_type{2}); - - for (auto m = 0u; m < n.size(); ++m) { - auto b = vector_type(n[m], value_type{1}); - - auto c = ublas::prod(a, b, m + 1); - - for (auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], value_type( static_cast< inner_type_t >(n[m]) ) * a[i]); - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents_tuple,[](auto const& /*unused*/, auto const& n){ + + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using vector_t = typename tensor_t::vector_type; + auto a = tensor_t(n); + a = 2; + + for (auto m = 0u; m < ublas::size(n); ++m) { + auto b = vector_t(n[m], value_t{1}); + + auto c = ublas::prod(a, b, m + 1); + + for (auto i = 0u; i < c.size(); ++i) + BOOST_CHECK_EQUAL(c[i], value_t( static_cast< inner_type_t >(n[m]) ) * a[i]); + } + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_matrix, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_matrix, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(fixed_rank_extents,[](auto const&, auto & n){ - using extents_type = typename std::decay::type; - using tensor_type = ublas::fixed_rank_tensor; - using matrix_type = typename tensor_type::matrix_type; - auto a = tensor_type(n, value_type{2}); - for (auto m = 0u; m < n.size(); ++m) { - - auto b = matrix_type ( n[m], n[m], value_type{1} ); - - auto c = ublas::prod(a, b, m + 1); - - for (auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], value_type( static_cast< inner_type_t >(n[m]) ) * a[i]); - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents_tuple,[](auto const& /*unused*/, auto const & n){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using matrix_t = typename tensor_t::matrix_type; + + auto a = tensor_t(n); + a = 2; + for (auto m = 0u; m < ublas::size(n); ++m) { + + auto b = matrix_t ( n[m], n[m], value_t{1} ); + auto c = ublas::prod(a, b, m + 1); + + for (auto i = 0u; i < c.size(); ++i){ + BOOST_CHECK_EQUAL(c[i], value_t( static_cast< inner_type_t >(n[m]) ) * a[i]); + } + } + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_tensor_1, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_tensor_1, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto const body = [](auto const& a, auto const& b){ - auto const pa = a.rank(); - - for (auto q = 0ul; q <= pa; ++q) { - - auto phi = std::vector(q); - - std::iota(phi.begin(), phi.end(), 1ul); - - auto c = ublas::prod(a, b, phi); - - auto acc = value_type(1); - for (auto i = 0ul; i < q; ++i) - acc *= value_type( static_cast< inner_type_t >( a.extents().at(phi.at(i) - 1) ) ); - - for (auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); - } - }; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + auto check = [&](auto const& a, auto const& b, std::index_sequence /*unused*/) + { + namespace ublas = boost::numeric::ublas; - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - auto n1 = n; - auto n2 = n; - using extents_type_1 = typename std::decay::type; - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = - ublas::fixed_rank_tensor; - using tensor_type_2 = - ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type{2}); - auto b = tensor_type_2(n2, value_type{3}); - body(a,b); - }); + constexpr auto q = sizeof...(qs); - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - auto n1 = n; - auto n2 = extents[I]; - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = - ublas::fixed_rank_tensor; - using tensor_type_2 = - ublas::dynamic_tensor; - auto a = tensor_type_1(n1, value_type{2}); - auto b = tensor_type_2(n2, value_type{3}); - body(a,b); - }); + using tensorA = std::decay_t; + using tensorB = std::decay_t; - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - auto n1 = extents[I]; - auto n2 = n; - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = - ublas::dynamic_tensor; - using tensor_type_2 = - ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type{2}); - auto b = tensor_type_2(n2, value_type{3}); - body(a,b); - }); + using extentsA = typename tensorA::extents_type; + using extentsB = typename tensorB::extents_type; -} + static_assert(!ublas::is_static_v && !ublas::is_static_v ); -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_tensor_2, value, test_types, fixture ) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + constexpr auto one_of_extents_is_resizable = ublas::is_dynamic_rank_v || + ublas::is_dynamic_rank_v; + using phi_type = std::conditional_t, + std::array >; + auto phi = phi_type{}; + if constexpr(std::is_same_v>){ + phi.resize(q); + } + std::iota(phi.begin(), phi.end(), std::size_t{1}); + auto c = ublas::prod(a, b, phi); - auto compute_factorial = [](auto const& p){ - auto f = 1ul; - for(auto i = 1u; i <= p; ++i) - f *= i; - return f; - }; + auto const& na = a.extents(); + auto acc = std::size_t{1}; + for (auto i = 0ul; i < q; ++i){ + acc *= na.at(phi.at(i)-1); + } + const auto v = value_t(acc) * a[0] * b[0]; + BOOST_CHECK( std::all_of(c.begin(),c.end(),[v](auto cc){ return cc == v;})); + }; + + + for_each_in_tuple(extents_tuple,[&](auto const& /*I*/, auto const& n){ + constexpr auto size = std::tuple_size_v>; + constexpr auto modes = std::make_index_sequence{}; + using tensor_t = ublas::tensor_static_rank; + auto a = tensor_t(n); + auto b = tensor_t(n); + a = 2; + b = 3; + for_each_in_index(modes, a,b, check ); + }); + + for_each_in_tuple(extents_tuple,[&](auto const& I, auto const& n){ + auto const& nA = n; + auto const& nB = extents_vector[I]; + constexpr auto sizeA = std::tuple_size_v>; + constexpr auto modes = std::make_index_sequence{}; + using tensorA_type = ublas::tensor_static_rank; + using tensorB_type = ublas::tensor_dynamic; + auto a = tensorA_type(nA); + auto b = tensorB_type(nB); + a = 2; + b = 3; + + for_each_in_index(modes, a,b, check ); + }); + + for_each_in_tuple(extents_tuple,[&](auto const& I, auto const& n){ + auto const& nA = extents_vector[I]; + auto const& nB = n; + constexpr auto sizeB = std::tuple_size_v>; + constexpr auto modes = std::make_index_sequence{}; + using tensor_t_1 = ublas::tensor_dynamic; + using tensor_t_2 = ublas::tensor_static_rank; + auto a = tensor_t_1(nA); + auto b = tensor_t_2(nB); + a = 2; + b = 3; + for_each_in_index(modes, a,b, check ); + + }); +} - auto permute_extents_s_1 = [](auto const& pi, auto const& na){ +// TODO: +#if 0 - auto nb = ublas::extents<>(na); - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - nb[pi[j]-1] = na[j]; - return nb; - }; - auto permute_extents_s_2 = [](auto const& pi, auto const& na){ - auto tempn = na.base(); - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - tempn[pi[j]-1] = na[j]; - return ublas::extents::type::_size>(tempn.begin(),tempn.end()); - }; +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_tensor_2, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + constexpr auto to_array = [](std::index_sequence/*unused*/) { + return std::array{is...}; + }; + + auto compute_factorial = [](std::index_sequence/*unused*/) { + return ( 1 * ... * is ); + }; + /* + auto compute_factorial = [](auto const& p){ + auto f = 1ul; + for(auto i = 1u; i <= p; ++i) + f *= i; + return f; + }; +*/ + auto permute_extents_dynamic_rank = [](auto const& pi, auto const& na){ + auto nb = ublas::extents<>(na.begin(),na.end()); + assert(std::size(pi) == ublas::size(na)); + for(auto j = 0u; j < std::size(pi); ++j) + nb[pi[j]-1] = na[j]; + return nb; + }; + + auto permute_extents_static_rank = [](std::array const& pi, auto const& na){ + //constexpr auto size = std::tuple_size_v>; + auto na_base = na.base(); + assert(std::size(pi) == size); + for(auto j = 0u; j < std::size(pi); ++j) + na_base[pi[j]-1] = na[j]; + return ublas::extents(na_base.begin(),na_base.end()); + }; + + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto const& n){ + auto const& na = n; + constexpr auto size = std::tuple_size_v>; + using tensorA_type = ublas::tensor_static_rank; + auto a = tensorA_type(na); + a = 2; + assert(a.rank() == size); + // auto const pa = a.rank(); + auto pi = to_array(std::make_index_sequence{}); + constexpr auto factorial = compute_factorial(std::make_index_sequence{}); + // auto pi = std::vector(pa); + // auto fac = compute_factorial(pa); + // std::iota(pi.begin(), pi.end(), 1); + + constexpr auto factorials = std::make_index_sequence{}; + + // for_each_in_tuple(factorials,[&](auto const& /*unused*/, auto const& /*unused*/){ + // using tensorB_type = ublas::tensor_dynamic; + // const auto nb = permute_extents_dynamic_rank(pi, na); + // const auto b = tensorB_type(nb, value_t{3}); + + // constexpr auto modes = std::make_index_sequence{}; + + // for_each_in_tuple(modes,[&](auto const& /*unused*/, auto const& /*unused*/){ + + + // const auto phia = to_array(std::make_index_sequence); + // const auto phib = std::array(q); + + // }); + + // for (auto f = 0ul; f < fac; ++f) { + // for (auto q = 0ul; q <= pa; ++q) { + + // auto phia = std::vector(q); + // auto phib = std::vector(q); + + // std::iota(phia.begin(), phia.end(), 1ul); + // std::transform(phia.begin(), phia.end(), phib.begin(), + // [&pi](std::size_t i) { return pi.at(i - 1); }); + + // auto c = ublas::prod(a, b, phia, phib); + + // auto acc = value_t(1); + // for (auto i = 0ul; i < q; ++i) + // acc *= value_t( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); + + // for (auto i = 0ul; i < c.size(); ++i) + // BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); + // } + + // std::next_permutation(pi.begin(), pi.end()); + // } + }); + + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto & /*n*/){ + // auto const& na = n; + // constexpr auto size = std::tuple_size_v>; + // using tensor_t_1 = ublas::tensor_static_rank; + // auto a = tensor_t_1(na, value_t{2}); + // auto const pa = a.rank(); + + // auto pi = std::vector(pa); + // auto fac = compute_factorial(pa); + // std::iota(pi.begin(), pi.end(), 1); + + // for (auto f = 0ul; f < fac; ++f) { + // auto nb = permute_extents_static_rank(pi, na); + + // using tensor_t_2 = ublas::tensor_static_rank; + // auto b = tensor_t_2(nb, value_t{3}); + + // for (auto q = 0ul; q <= pa; ++q) { + + // auto phia = std::vector(q); + // auto phib = std::vector(q); + + // std::iota(phia.begin(), phia.end(), 1ul); + // std::transform(phia.begin(), phia.end(), phib.begin(), + // [&pi](std::size_t i) { return pi.at(i - 1); }); + + // auto c = ublas::prod(a, b, phia, phib); - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - auto na = n; - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(na, value_type{2}); - auto const pa = a.rank(); - - auto pi = std::vector(pa); - auto fac = compute_factorial(pa); - std::iota(pi.begin(), pi.end(), 1); - - for (auto f = 0ul; f < fac; ++f) { - auto nb = permute_extents_s_1(pi, na); - using tensor_type_2 = ublas::dynamic_tensor; - auto b = tensor_type_2(nb, value_type{3}); - - for (auto q = 0ul; q <= pa; ++q) { - - auto phia = std::vector(q); - auto phib = std::vector(q); - - std::iota(phia.begin(), phia.end(), 1ul); - std::transform(phia.begin(), phia.end(), phib.begin(), - [&pi](std::size_t i) { return pi.at(i - 1); }); - - auto c = ublas::prod(a, b, phia, phib); - - auto acc = value_type(1); - for (auto i = 0ul; i < q; ++i) - acc *= value_type( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); - - for (auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); - } - - std::next_permutation(pi.begin(), pi.end()); - } - }); + // auto acc = value_t(1); + // for (auto i = 0ul; i < q; ++i){ + // acc *= value_t( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); + // } - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - auto na = n; - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(na, value_type{2}); - auto const pa = a.rank(); - - auto pi = std::vector(pa); - auto fac = compute_factorial(pa); - std::iota(pi.begin(), pi.end(), 1); - - for (auto f = 0ul; f < fac; ++f) { - auto nb = permute_extents_s_2(pi, na); - - using extents_type_2 = typename std::decay::type; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto b = tensor_type_2(nb, value_type{3}); - - for (auto q = 0ul; q <= pa; ++q) { - - auto phia = std::vector(q); - auto phib = std::vector(q); - - std::iota(phia.begin(), phia.end(), 1ul); - std::transform(phia.begin(), phia.end(), phib.begin(), - [&pi](std::size_t i) { return pi.at(i - 1); }); - - auto c = ublas::prod(a, b, phia, phib); - - auto acc = value_type(1); - for (auto i = 0ul; i < q; ++i) - acc *= value_type( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); - - for (auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); - } - - std::next_permutation(pi.begin(), pi.end()); - } - }); + // for (auto i = 0ul; i < c.size(); ++i) + // BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); + // } -} + // std::next_permutation(pi.begin(), pi.end()); + // } + }); +} +#endif -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_inner_prod, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_inner_prod, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto const body = [&](auto const& a, auto const& b){ - auto c = ublas::inner_prod(a, b); - auto r = std::inner_product(a.begin(),a.end(), b.begin(),value_type(0)); - BOOST_CHECK_EQUAL( c , r ); - }; + using dtensor_t = ublas::tensor_dynamic; - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - using extents_type_1 = typename std::decay::type; - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(n, value_type(2)); - auto b = tensor_type_2(n, value_type(1)); - body(a,b); + auto const body = [&](auto const& a, auto const& b){ + auto c = ublas::inner_prod(a, b); + auto r = std::inner_product(a.begin(),a.end(), b.begin(),value_t(0)); + BOOST_CHECK_EQUAL( c , r ); + }; - }); + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto & n){ + constexpr auto size = std::tuple_size_v>; + using stensor_t = ublas::tensor_static_rank; + auto a = stensor_t(n); + auto b = stensor_t(n); + a = 2; + b = 3; + body(a,b); - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - using tensor_type_2 = ublas::dynamic_tensor; - auto a = tensor_type_1(n, value_type(2)); - auto b = tensor_type_2(extents[I], value_type(1)); - body(a,b); + }); - }); + for_each_in_tuple(extents_tuple,[&](auto const& I, auto & n){ + constexpr auto size = std::tuple_size_v>; + using stensor_t = ublas::tensor_static_rank; + auto a = stensor_t(n); + auto b = dtensor_t(extents_vector[I]); + a = 2; + b = 1; - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = ublas::dynamic_tensor; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(extents[I], value_type(2)); - auto b = tensor_type_2(n, value_type(1)); - body(a,b); + body(a,b); - }); + }); + + for_each_in_tuple(extents_tuple,[&](auto const& I, auto & n){ + constexpr auto size = std::tuple_size_v>; + using stensor_t = ublas::tensor_static_rank; + auto a = dtensor_t(extents_vector[I]); + auto b = stensor_t(n); + a = 2; + b = 1; + body(a,b); + + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_outer_prod, value, test_types, fixture ) + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_outer_prod, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(fixed_rank_extents,[&](auto const&, auto const& n1){ - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type(2)); - for_each_tuple(fixed_rank_extents,[&](auto const& /*J*/, auto const& n2){ - using extents_type_2 = typename std::decay::type; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto b = tensor_type_2(n2, value_type(1)); - auto c = ublas::outer_prod(a, b); - - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto const& n1){ + constexpr auto size1 = std::tuple_size_v>; + using tensor_t_1 = ublas::tensor_static_rank; + auto a = tensor_t_1(n1); + a = 2; + for_each_in_tuple(extents_tuple,[&](auto const& /*J*/, auto const& n2){ + constexpr auto size2 = std::tuple_size_v>; + using tensor_t_2 = ublas::tensor_static_rank; + auto b = tensor_t_2(n2); + b = 1; + auto c = ublas::outer_prod(a, b); + + BOOST_CHECK ( std::all_of(c.begin(),c.end(), [&a,&b](auto cc){return cc == a[0]*b[0];}) ); }); - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto const& /*n1*/){ - using tensor_type_1 = ublas::dynamic_tensor; - auto a = tensor_type_1(extents[I], value_type(2)); - for_each_tuple(fixed_rank_extents,[&](auto const& /*J*/, auto const& n2){ - using extents_type_2 = typename std::decay::type; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto b = tensor_type_2(n2, value_type(1)); - auto c = ublas::outer_prod(a, b); + }); - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - }); + for_each_in_tuple(extents_tuple,[&](auto const& I, auto const& /*n1*/){ + using tensor_t_1 = ublas::tensor_dynamic; + auto a = tensor_t_1(extents_vector[I]); + a = 2; + for_each_in_tuple(extents_tuple,[&](auto const& /*J*/, auto const& n2){ + constexpr auto size = std::tuple_size_v>; + using tensor_t_2 = ublas::tensor_static_rank; + auto b = tensor_t_2(n2); + b = 1; + auto c = ublas::outer_prod(a, b); + BOOST_CHECK ( std::all_of(c.begin(),c.end(), [&a,&b](auto cc){return cc == a[0]*b[0];}) ); + +// for(auto const& cc : c) +// BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); }); - for_each_tuple(fixed_rank_extents,[&](auto const&, auto const& n1){ - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type(2)); - for(auto n2 : extents){ - using tensor_type_2 = ublas::dynamic_tensor; - auto b = tensor_type_2(n2, value_type(1)); - auto c = ublas::outer_prod(a, b); + }); - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - } + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto const& n1){ + constexpr auto size = std::tuple_size_v>; + using tensor_t_1 = ublas::tensor_static_rank; + auto a = tensor_t_1(n1); + a = 2; + for(auto const& n2 : extents_vector){ + using tensor_t_2 = ublas::tensor_dynamic; + auto b = tensor_t_2(n2); + b = 1; + auto c = ublas::outer_prod(a, b); - }); + BOOST_CHECK ( std::all_of(c.begin(),c.end(), [&a,&b](auto cc){return cc == a[0]*b[0];}) ); + +// for(auto const& cc : c) +// BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); + } + + }); } diff --git a/test/tensor/test_fixed_rank_operators_arithmetic.cpp b/test/tensor/test_fixed_rank_operators_arithmetic.cpp index e5bbcf86a..08ef6b8f9 100644 --- a/test/tensor/test_fixed_rank_operators_arithmetic.cpp +++ b/test/tensor/test_fixed_rank_operators_arithmetic.cpp @@ -18,7 +18,7 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_fixed_rank_tensor_arithmetic_operations) +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_arithmetic_operations) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; @@ -26,136 +26,131 @@ using test_types = zip::with_t - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<3>, // 3 - extents_type<3>, // 4 - extents_type<4> // 5 - > extents = { - extents_type<2>{1,1}, - extents_type<2>{2,3}, - extents_type<3>{4,1,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 + > extents = { + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto r = tensor_type (e); - auto v = value_type {}; + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - r = t + t + t + t2; + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + r = t + t + t + t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3*t(i) + t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3*t(i) + t2(i) ); - r = t2 / (t+3) * (t+1) - t2; // r = ( t2/ ((t+3)*(t+1)) ) - t2 + r = t2 / (t+3) * (t+1) - t2; // r = ( t2/ ((t+3)*(t+1)) ) - t2 - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), t2(i) / (t(i)+3)*(t(i)+1) - t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), t2(i) / (t(i)+3)*(t(i)+1) - t2(i) ); - r = 3+t2 / (t+3) * (t+1) * t - t2; // r = 3+( t2/ ((t+3)*(t+1)*t) ) - t2 + r = 3+t2 / (t+3) * (t+1) * t - t2; // r = 3+( t2/ ((t+3)*(t+1)*t) ) - t2 - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (t(i)+3)*(t(i)+1)*t(i) - t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (t(i)+3)*(t(i)+1)*t(i) - t2(i) ); - r = t2 - t + t2 - t; + r = t2 - t + t2 - t; - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 4 ); + for(auto i = 0ul; i < r.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 4 ); - r = tensor_type (e,1) + tensor_type (e,1); + r = t * t * t * t2; - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), t(i)*t(i)*t(i)*t2(i) ); - r = t * t * t * t2; + r = (t2/t2) * (t2/t2); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), t(i)*t(i)*t(i)*t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 1 ); + }; - r = (t2/t2) * (t2/t2); - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 1 ); - }; - - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); - tensor_type r1 = t + 2 + t + 2; + tensor_t r1 = t + 2 + t + 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r1(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r1(i), 2*t(i) + 4 ); - tensor_type r2 = 2 + t + 2 + t; + tensor_t r2 = 2 + t + 2 + t; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r2(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r2(i), 2*t(i) + 4 ); - tensor_type r3 = (t-2) + (t-2); + tensor_t r3 = (t-2) + (t-2); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r3(i), 2*t(i) - 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r3(i), 2*t(i) - 4 ); - tensor_type r4 = (t*2) * (3*t); + tensor_t r4 = (t*2) * (3*t); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r4(i), 2*3*t(i)*t(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r4(i), 2*3*t(i)*t(i) ); - tensor_type r5 = (t2*2) / (2*t2) * t2; + tensor_t r5 = (t2*2) / (2*t2) * t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r5(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r5(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); - tensor_type r6 = (t2/2+1) / (2/t2+1) / t2; + tensor_t r6 = (t2/2+1) / (2/t2+1) / t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r6(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r6(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); - }; + }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } @@ -164,79 +159,79 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto r = tensor_type (e); - auto v = value_type {}; + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); - r = t + 2; - r += t; - r += 2; + r = t + 2; + r += t; + r += 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); - r = 2 + t; - r += t; - r += 2; + r = 2 + t; + r += t; + r += 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); - r = (t-2); - r += t; - r -= 2; + r = (t-2); + r += t; + r -= 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) - 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) - 4 ); - r = (t*2); - r *= 3; - r *= t; + r = (t*2); + r *= 3; + r *= t; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*3*t(i)*t(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*3*t(i)*t(i) ); - r = (t2*2); - r /= 2; - r /= t2; - r *= t2; + r = (t2*2); + r /= 2; + r /= t2; + r *= t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); - r = (t2/2+1); - r /= (2/t2+1); - r /= t2; + r = (t2/2+1); + r /= (2/t2+1); + r /= t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); - tensor_type q = -r; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( q(i), -r(i) ); + tensor_t q = -r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( q(i), -r(i) ); - tensor_type p = +r; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( p(i), r(i) ); - }; + tensor_t p = +r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( p(i), r(i) ); + }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } diff --git a/test/tensor/test_fixed_rank_operators_comparison.cpp b/test/tensor/test_fixed_rank_operators_comparison.cpp index a4c60da0c..59338c387 100644 --- a/test/tensor/test_fixed_rank_operators_comparison.cpp +++ b/test/tensor/test_fixed_rank_operators_comparison.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -17,187 +17,181 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_fixed_rank_tensor_comparison) +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_comparison) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; using test_types = zip::with_t; struct fixture { - template - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<3>, // 3 - extents_type<3>, // 4 - extents_type<4> // 5 + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 > extents = { - extents_type<2>{1,1}, - extents_type<2>{2,3}, - extents_type<3>{4,1,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; - - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); - - if(t.empty()) - return; - - BOOST_CHECK(!(t < t)); - BOOST_CHECK(!(t > t)); - BOOST_CHECK( t < t2 ); - BOOST_CHECK( t2 > t ); - BOOST_CHECK( t <= t ); - BOOST_CHECK( t >= t ); - BOOST_CHECK( t <= t2 ); - BOOST_CHECK( t2 >= t ); - BOOST_CHECK( t2 >= t2 ); - BOOST_CHECK( t2 >= t ); - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) + { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK(!(t < t)); + BOOST_CHECK(!(t > t)); + BOOST_CHECK( t < t2 ); + BOOST_CHECK( t2 > t ); + BOOST_CHECK( t <= t ); + BOOST_CHECK( t >= t ); + BOOST_CHECK( t <= t2 ); + BOOST_CHECK( t2 >= t ); + BOOST_CHECK( t2 >= t2 ); + BOOST_CHECK( t2 >= t ); + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); - if(t.empty()) - return; + if(t.empty()) + return; - BOOST_CHECK( !(t < t) ); - BOOST_CHECK( !(t > t) ); - BOOST_CHECK( t < (t2+t) ); - BOOST_CHECK( (t2+t) > t ); - BOOST_CHECK( t <= (t+t) ); - BOOST_CHECK( (t+t2) >= t ); - BOOST_CHECK( (t2+t2+2) >= t); - BOOST_CHECK( 2*t2 > t ); - BOOST_CHECK( t < 2*t2 ); - BOOST_CHECK( 2*t2 > t); - BOOST_CHECK( 2*t2 >= t2 ); - BOOST_CHECK( t2 <= 2*t2); - BOOST_CHECK( 3*t2 >= t ); + BOOST_CHECK( !(t < t) ); + BOOST_CHECK( !(t > t) ); + BOOST_CHECK( t < (t2+t) ); + BOOST_CHECK( (t2+t) > t ); + BOOST_CHECK( t <= (t+t) ); + BOOST_CHECK( (t+t2) >= t ); + BOOST_CHECK( (t2+t2+2) >= t); + BOOST_CHECK( 2*t2 > t ); + BOOST_CHECK( t < 2*t2 ); + BOOST_CHECK( 2*t2 > t); + BOOST_CHECK( 2*t2 >= t2 ); + BOOST_CHECK( t2 <= 2*t2); + BOOST_CHECK( 3*t2 >= t ); + }); - }; - - for_each_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - +//BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) +//{ +// namespace ublas = boost::numeric::ublas; +// using value_t = typename value::first_type; +// using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - BOOST_CHECK( tensor_type(e,value_type{2}) == tensor_type(e,value_type{2}) ); - BOOST_CHECK( tensor_type(e,value_type{2}) != tensor_type(e,value_type{1}) ); +// for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { +// using extents_t = std::decay_t; +// using tensor_t = ublas::tensor_static_rank, layout_t>; - if(e.empty()) - return; +// BOOST_CHECK( tensor_t(e,value_t{2}) == tensor_t(e,value_t{2}) ); +// BOOST_CHECK( tensor_t(e,value_t{2}) != tensor_t(e,value_t{1}) ); - BOOST_CHECK( !(tensor_type(e,2) < 2) ); - BOOST_CHECK( !(tensor_type(e,2) > 2) ); - BOOST_CHECK( (tensor_type(e,2) >= 2) ); - BOOST_CHECK( (tensor_type(e,2) <= 2) ); - BOOST_CHECK( (tensor_type(e,2) == 2) ); - BOOST_CHECK( (tensor_type(e,2) != 3) ); +// if(ublas::empty(e)) +// return; - BOOST_CHECK( !(2 > tensor_type(e,2)) ); - BOOST_CHECK( !(2 < tensor_type(e,2)) ); - BOOST_CHECK( (2 <= tensor_type(e,2)) ); - BOOST_CHECK( (2 >= tensor_type(e,2)) ); - BOOST_CHECK( (2 == tensor_type(e,2)) ); - BOOST_CHECK( (3 != tensor_type(e,2)) ); +// BOOST_CHECK( !(tensor_t(e,2) < 2) ); +// BOOST_CHECK( !(tensor_t(e,2) > 2) ); +// BOOST_CHECK( (tensor_t(e,2) >= 2) ); +// BOOST_CHECK( (tensor_t(e,2) <= 2) ); +// BOOST_CHECK( (tensor_t(e,2) == 2) ); +// BOOST_CHECK( (tensor_t(e,2) != 3) ); - BOOST_CHECK( !( tensor_type(e,2)+3 < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+3 > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 != 6) ); +// BOOST_CHECK( !(2 > tensor_t(e,2)) ); +// BOOST_CHECK( !(2 < tensor_t(e,2)) ); +// BOOST_CHECK( (2 <= tensor_t(e,2)) ); +// BOOST_CHECK( (2 >= tensor_t(e,2)) ); +// BOOST_CHECK( (2 == tensor_t(e,2)) ); +// BOOST_CHECK( (3 != tensor_t(e,2)) ); +// BOOST_CHECK( !( tensor_t(e,2)+3 < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+3 > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 != 6) ); - BOOST_CHECK( !( 5 > tensor_type(e,2)+3) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+3) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+3) ); +// BOOST_CHECK( !( 5 > tensor_t(e,2)+3) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+3) ); - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) != 6) ); +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) != 6) ); - BOOST_CHECK( !( 5 > tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+tensor_type(e,3)) ); - }; +// BOOST_CHECK( !( 5 > tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+tensor_t(e,3)) ); -for_each_tuple(extents,check); +// }); -} +//} BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_fixed_rank_strides.cpp b/test/tensor/test_fixed_rank_strides.cpp index c90cba953..c7efd6413 100644 --- a/test/tensor/test_fixed_rank_strides.cpp +++ b/test/tensor/test_fixed_rank_strides.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,152 +10,175 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // - +#if 0 #include #include -#include - -//BOOST_AUTO_TEST_SUITE(test_strides, * boost::unit_test::depends_on("test_extents")); +#include BOOST_AUTO_TEST_SUITE(test_fixed_rank_strides) using test_types = std::tuple; +template +using strides =boost::numeric::ublas::strides,L>; + BOOST_AUTO_TEST_CASE_TEMPLATE( test_fixed_rank_strides_ctor, value, test_types) { - namespace ub = boost::numeric::ublas; - - ub::basic_fixed_rank_strides s0{ub::extents<0>{}}; - BOOST_CHECK ( s0.empty()); - BOOST_CHECK_EQUAL ( s0.size(), 0); - - ub::basic_fixed_rank_strides s1{ub::extents<2>{1,1}}; - BOOST_CHECK (!s1.empty()); - BOOST_CHECK_EQUAL ( s1.size(), 2); - - ub::basic_fixed_rank_strides s2{ub::extents<2>{1,2}}; - BOOST_CHECK (!s2.empty()); - BOOST_CHECK_EQUAL ( s2.size(), 2); - - ub::basic_fixed_rank_strides s3{ub::extents<2>{2,1}}; - BOOST_CHECK (!s3.empty()); - BOOST_CHECK_EQUAL ( s3.size(), 2); - - ub::basic_fixed_rank_strides s4{ub::extents<2>{2,3}}; - BOOST_CHECK (!s4.empty()); - BOOST_CHECK_EQUAL ( s4.size(), 2); - - ub::basic_fixed_rank_strides s5{ub::extents<3>{2,3,1}}; - BOOST_CHECK (!s5.empty()); - BOOST_CHECK_EQUAL ( s5.size(), 3); - - ub::basic_fixed_rank_strides s6{ub::extents<3>{1,2,3}}; - BOOST_CHECK (!s6.empty()); - BOOST_CHECK_EQUAL ( s6.size(), 3); - - ub::basic_fixed_rank_strides s7{ub::extents<3>{4,2,3}}; - BOOST_CHECK (!s7.empty()); - BOOST_CHECK_EQUAL ( s7.size(), 3); + namespace ublas = boost::numeric::ublas; +// using layout_type = value; +// constexpr auto layout = value{}; + + auto s0 = strides<0,value>{}; + auto s1 = strides<1,value>({1} ); + auto s3 = strides<1,value>({3} ); + auto s11 = strides<2,value>({1,1} ); + auto s12 = strides<2,value>({1,2} ); + auto s21 = strides<2,value>({2,1} ); + auto s23 = strides<2,value>({2,3} ); + auto s231 = strides<3,value>({2,3,1} ); + auto s123 = strides<3,value>({1,2,3} ); + auto s423 = strides<3,value>({4,2,3} ); + + BOOST_CHECK ( s0.empty()); + BOOST_CHECK (! s1.empty()); + BOOST_CHECK (! s3.empty()); + BOOST_CHECK (! s11.empty()); + BOOST_CHECK (! s12.empty()); + BOOST_CHECK (! s21.empty()); + BOOST_CHECK (! s23.empty()); + BOOST_CHECK (!s231.empty()); + BOOST_CHECK (!s123.empty()); + BOOST_CHECK (!s423.empty()); + + + BOOST_CHECK_EQUAL ( s0.size(), 0); + BOOST_CHECK_EQUAL ( s1.size(), 3); + BOOST_CHECK_EQUAL ( s3.size(), 1); + BOOST_CHECK_EQUAL ( s11.size(), 2); + BOOST_CHECK_EQUAL ( s12.size(), 2); + BOOST_CHECK_EQUAL ( s21.size(), 2); + BOOST_CHECK_EQUAL ( s23.size(), 2); + BOOST_CHECK_EQUAL ( s231.size(), 3); + BOOST_CHECK_EQUAL ( s123.size(), 3); + BOOST_CHECK_EQUAL ( s423.size(), 3); } BOOST_AUTO_TEST_CASE( test_fixed_rank_strides_ctor_access_first_order) { - namespace ub = boost::numeric::ublas; - - ub::basic_fixed_rank_strides s1{ub::extents<2>{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - ub::basic_fixed_rank_strides s2{ub::extents<2>{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - ub::basic_fixed_rank_strides s3{ub::extents<2>{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - ub::basic_fixed_rank_strides s4{ub::extents<2>{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 1); - BOOST_CHECK_EQUAL ( s4[1], 2); - - ub::basic_fixed_rank_strides s5{ub::extents<3>{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 1); - BOOST_CHECK_EQUAL ( s5[1], 2); - BOOST_CHECK_EQUAL ( s5[2], 6); - - ub::basic_fixed_rank_strides s6{ub::extents<3>{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 1); - BOOST_CHECK_EQUAL ( s6[1], 1); - BOOST_CHECK_EQUAL ( s6[2], 2); - - ub::basic_fixed_rank_strides s7{ub::extents<3>{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 1); - BOOST_CHECK_EQUAL ( s7[1], 2); - BOOST_CHECK_EQUAL ( s7[2], 2); - - ub::basic_fixed_rank_strides s8{ub::extents<3>{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 1); - BOOST_CHECK_EQUAL ( s8[1], 4); - BOOST_CHECK_EQUAL ( s8[2], 8); + using value = boost::numeric::ublas::layout::first_order; +// constexpr auto layout = boost::numeric::ublas::layout::first_order{}; + + auto s1 = strides<1,value>({1} ); + auto s3 = strides<1,value>({3} ); + auto s11 = strides<2,value>({1,1} ); + auto s12 = strides<2,value>({1,2} ); + auto s21 = strides<2,value>({2,1} ); + auto s23 = strides<2,value>({2,3} ); + auto s231 = strides<3,value>({2,3,1} ); + auto s213 = strides<3,value>({2,3,1} ); + auto s123 = strides<3,value>({1,2,3} ); + auto s423 = strides<3,value>({4,2,3} ); + + + BOOST_REQUIRE_EQUAL ( s1.size(),1); + BOOST_REQUIRE_EQUAL ( s3.size(),1); + BOOST_REQUIRE_EQUAL ( s11.size(),2); + BOOST_REQUIRE_EQUAL ( s12.size(),2); + BOOST_REQUIRE_EQUAL ( s21.size(),2); + BOOST_REQUIRE_EQUAL ( s23.size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s213.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); + + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); + + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); + + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); + + BOOST_CHECK_EQUAL ( s23[0], 1); + BOOST_CHECK_EQUAL ( s23[1], 2); + + BOOST_CHECK_EQUAL ( s231[0], 1); + BOOST_CHECK_EQUAL ( s231[1], 2); + BOOST_CHECK_EQUAL ( s231[2], 6); + + BOOST_CHECK_EQUAL ( s123[0], 1); + BOOST_CHECK_EQUAL ( s123[1], 1); + BOOST_CHECK_EQUAL ( s123[2], 2); + + BOOST_CHECK_EQUAL ( s213[0], 1); + BOOST_CHECK_EQUAL ( s213[1], 2); + BOOST_CHECK_EQUAL ( s213[2], 2); + + BOOST_CHECK_EQUAL ( s423[0], 1); + BOOST_CHECK_EQUAL ( s423[1], 4); + BOOST_CHECK_EQUAL ( s423[2], 8); } BOOST_AUTO_TEST_CASE( test_fixed_rank_strides_ctor_access_last_order) { - namespace ub = boost::numeric::ublas; - - ub::basic_fixed_rank_strides s1{ub::extents<2>{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - ub::basic_fixed_rank_strides s2{ub::extents<2>{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - ub::basic_fixed_rank_strides s3{ub::extents<2>{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - ub::basic_fixed_rank_strides s4{ub::extents<2>{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 3); - BOOST_CHECK_EQUAL ( s4[1], 1); - - ub::basic_fixed_rank_strides s5{ub::extents<3>{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 3); - BOOST_CHECK_EQUAL ( s5[1], 1); - BOOST_CHECK_EQUAL ( s5[2], 1); - - ub::basic_fixed_rank_strides s6{ub::extents<3>{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 6); - BOOST_CHECK_EQUAL ( s6[1], 3); - BOOST_CHECK_EQUAL ( s6[2], 1); - - ub::basic_fixed_rank_strides s7{ub::extents<3>{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 3); - BOOST_CHECK_EQUAL ( s7[1], 3); - BOOST_CHECK_EQUAL ( s7[2], 1); - - ub::basic_fixed_rank_strides s8{ub::extents<3>{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 6); - BOOST_CHECK_EQUAL ( s8[1], 3); - BOOST_CHECK_EQUAL ( s8[2], 1); + using value = boost::numeric::ublas::layout::first_order; + // constexpr auto layout = boost::numeric::ublas::layout::first_order{}; + + auto s1 = strides<1,value>({1} ); + auto s3 = strides<1,value>({3} ); + auto s11 = strides<2,value>({1,1} ); + auto s12 = strides<2,value>({1,2} ); + auto s21 = strides<2,value>({2,1} ); + auto s23 = strides<2,value>({2,3} ); + auto s231 = strides<3,value>({2,3,1} ); + auto s213 = strides<3,value>({2,3,1} ); + auto s123 = strides<3,value>({1,2,3} ); + auto s423 = strides<3,value>({4,2,3} ); + + BOOST_REQUIRE_EQUAL ( s1.size(),1); + BOOST_REQUIRE_EQUAL ( s3.size(),1); + BOOST_REQUIRE_EQUAL ( s11.size(),2); + BOOST_REQUIRE_EQUAL ( s12.size(),2); + BOOST_REQUIRE_EQUAL ( s21.size(),2); + BOOST_REQUIRE_EQUAL ( s23.size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s213.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); + + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); + + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); + + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); + + BOOST_CHECK_EQUAL ( s23[0], 3); + BOOST_CHECK_EQUAL ( s23[1], 1); + + BOOST_CHECK_EQUAL ( s231[0], 3); + BOOST_CHECK_EQUAL ( s231[1], 1); + BOOST_CHECK_EQUAL ( s231[2], 1); + + BOOST_CHECK_EQUAL ( s123[0], 6); + BOOST_CHECK_EQUAL ( s123[1], 3); + BOOST_CHECK_EQUAL ( s123[2], 1); + + BOOST_CHECK_EQUAL ( s213[0], 3); + BOOST_CHECK_EQUAL ( s213[1], 3); + BOOST_CHECK_EQUAL ( s213[2], 1); + + BOOST_CHECK_EQUAL ( s423[0], 6); + BOOST_CHECK_EQUAL ( s423[1], 3); + BOOST_CHECK_EQUAL ( s423[2], 1); + } BOOST_AUTO_TEST_SUITE_END() + +#endif diff --git a/test/tensor/test_fixed_rank_tensor.cpp b/test/tensor/test_fixed_rank_tensor.cpp index 918020ddf..b7050c897 100644 --- a/test/tensor/test_fixed_rank_tensor.cpp +++ b/test/tensor/test_fixed_rank_tensor.cpp @@ -18,358 +18,356 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_tensor ) +BOOST_AUTO_TEST_SUITE ( test_tensor_static_rank ) using test_types = zip>::with_t; BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_ctor, value, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto a1 = ublas::fixed_rank_tensor{}; - BOOST_CHECK_EQUAL( a1.size() , 0ul ); - BOOST_CHECK( a1.empty() ); - BOOST_CHECK_EQUAL( a1.data() , nullptr); - - auto a2 = ublas::fixed_rank_tensor{1,1}; - BOOST_CHECK_EQUAL( a2.size() , 1 ); - BOOST_CHECK( !a2.empty() ); - BOOST_CHECK_NE( a2.data() , nullptr); - - auto a3 = ublas::fixed_rank_tensor{2,1}; - BOOST_CHECK_EQUAL( a3.size() , 2 ); - BOOST_CHECK( !a3.empty() ); - BOOST_CHECK_NE( a3.data() , nullptr); - - auto a4 = ublas::fixed_rank_tensor{1,2}; - BOOST_CHECK_EQUAL( a4.size() , 2 ); - BOOST_CHECK( !a4.empty() ); - BOOST_CHECK_NE( a4.data() , nullptr); - - auto a5 = ublas::fixed_rank_tensor{2,1}; - BOOST_CHECK_EQUAL( a5.size() , 2 ); - BOOST_CHECK( !a5.empty() ); - BOOST_CHECK_NE( a5.data() , nullptr); - - auto a6 = ublas::fixed_rank_tensor{4,3,2}; - BOOST_CHECK_EQUAL( a6.size() , 4*3*2 ); - BOOST_CHECK( !a6.empty() ); - BOOST_CHECK_NE( a6.data() , nullptr); - - auto a7 = ublas::fixed_rank_tensor{4,1,2}; - BOOST_CHECK_EQUAL( a7.size() , 4*1*2 ); - BOOST_CHECK( !a7.empty() ); - BOOST_CHECK_NE( a7.data() , nullptr); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto a2 = ublas::tensor_static_rank{1,1}; + BOOST_CHECK_EQUAL( a2.size() , 1 ); + BOOST_CHECK( !a2.empty() ); + BOOST_CHECK_NE( a2.data() , nullptr); + + auto a3 = ublas::tensor_static_rank{2,1}; + BOOST_CHECK_EQUAL( a3.size() , 2 ); + BOOST_CHECK( !a3.empty() ); + BOOST_CHECK_NE( a3.data() , nullptr); + + auto a4 = ublas::tensor_static_rank{1,2}; + BOOST_CHECK_EQUAL( a4.size() , 2 ); + BOOST_CHECK( !a4.empty() ); + BOOST_CHECK_NE( a4.data() , nullptr); + + auto a5 = ublas::tensor_static_rank{2,1}; + BOOST_CHECK_EQUAL( a5.size() , 2 ); + BOOST_CHECK( !a5.empty() ); + BOOST_CHECK_NE( a5.data() , nullptr); + + auto a6 = ublas::tensor_static_rank{4,3,2}; + BOOST_CHECK_EQUAL( a6.size() , 4*3*2 ); + BOOST_CHECK( !a6.empty() ); + BOOST_CHECK_NE( a6.data() , nullptr); + + auto a7 = ublas::tensor_static_rank{4,1,2}; + BOOST_CHECK_EQUAL( a7.size() , 4*1*2 ); + BOOST_CHECK( !a7.empty() ); + BOOST_CHECK_NE( a7.data() , nullptr); } struct fixture { - template - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<3>, // 3 - extents_type<3>, // 4 - extents_type<4> // 5 + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 > extents = { - extents_type<2>{1,1}, - extents_type<2>{2,3}, - extents_type<3>{4,1,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - auto t = ublas::fixed_rank_tensor{e}; - - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); - } - else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + auto t = ublas::tensor_static_rank{e}; + + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + if(ublas::empty(e)) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - auto r = ublas::fixed_rank_tensor{e}; - - auto t = r; - BOOST_CHECK_EQUAL ( t.size() , r.size() ); - BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); - BOOST_CHECK ( t.strides() == r.strides() ); - BOOST_CHECK ( t.extents() == r.extents() ); - - if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); - } - else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - } - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], r[i] ); - - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + auto r = ublas::tensor_static_rank{e}; + + auto t = r; + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + // BOOST_CHECK ( t.strides() == r.strides() ); + BOOST_CHECK ( t.extents() == r.extents() ); + + if(ublas::empty(e)) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r[i] ); + + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using other_layout_type = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using other_layout_t = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto r = tensor_type{e}; - ublas::fixed_rank_tensor t = r; - tensor_type q = t; + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using other_tensor_t = ublas::tensor_static_rank; + auto r = tensor_t(e); + other_tensor_t t = r; + tensor_t q = t; - BOOST_CHECK_EQUAL ( t.size() , r.size() ); - BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); - BOOST_CHECK ( t.extents() == r.extents() ); + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + BOOST_CHECK ( t.extents() == r.extents() ); - BOOST_CHECK_EQUAL ( q.size() , r.size() ); - BOOST_CHECK_EQUAL ( q.rank() , r.rank() ); - BOOST_CHECK ( q.strides() == r.strides() ); - BOOST_CHECK ( q.extents() == r.extents() ); + BOOST_CHECK_EQUAL ( q.size() , r.size() ); + BOOST_CHECK_EQUAL ( q.rank() , r.rank() ); + // BOOST_CHECK ( q.strides() == r.strides() ); + BOOST_CHECK ( q.extents() == r.extents() ); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( q[i], r[i] ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( q[i], r[i] ); - }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto r = tensor_type{e}; - auto t = std::move(r); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - - if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); - } - else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - } - - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto r = tensor_t{e}; + auto t = std::move(r); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + + if(ublas::empty(e)) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - std::random_device device{}; - std::minstd_rand0 generator(device()); + std::random_device device{}; + std::minstd_rand0 generator(device()); - using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; - auto distribution = distribution_type(1,6); + using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; + auto distribution = distribution_type(1,6); - for_each_tuple(extents, [&](auto const&, auto const& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; - auto r = value_type( static_cast< inner_type_t >(distribution(generator)) ); - auto t = tensor_type{e,r}; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], r ); + auto r = value_t( static_cast< inner_type_t >(distribution(generator)) ); + auto t = tensor_t(e); + t = r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r ); - }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - using array_type = typename tensor_type::array_type; - - auto a = array_type(product(e)); - auto v = value_type {}; - - for(auto& aa : a){ - aa = v; - v += value_type{1}; - } - auto t = tensor_type{e, a}; - v = value_type{}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}) - BOOST_CHECK_EQUAL( t[i], v); + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using container_t = typename tensor_t::container_type; - }); + auto a = container_t(product(e)); + auto v = value_t {}; + + for(auto& aa : a){ + aa = v; + v += value_t{1}; + } + auto t = tensor_t(e, a); + v = value_t{}; + + for(auto i = 0ul; i < t.size(); ++i, v+=value_t{1}) + BOOST_CHECK_EQUAL( t[i], v); + + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - - auto t = tensor_type{e}; - auto v = value_type {}; - for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}){ - t[i] = v; - BOOST_CHECK_EQUAL( t[i], v ); - - t(i) = v; - BOOST_CHECK_EQUAL( t(i), v ); - } + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - }); + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + + auto t = tensor_t{e}; + auto v = value_t {}; + for(auto i = 0ul; i < t.size(); ++i, v+=value_t{1}){ + t[i] = v; + BOOST_CHECK_EQUAL( t[i], v ); + + t(i) = v; + BOOST_CHECK_EQUAL( t(i), v ); + } + + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check1 = [](const auto& t) - { - auto v = value_type{}; - for(auto k = 0ul; k < t.size(); ++k){ - BOOST_CHECK_EQUAL(t[k], v); - v+=value_type{1}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check1 = [](const auto& t) + { + auto v = value_t{}; + for(auto k = 0ul; k < t.size(); ++k){ + BOOST_CHECK_EQUAL(t[k], v); + v+=value_t{1}; + } + }; + + auto check2 = [](const auto& t) + { + std::array k = {0,0}; + auto r = std::is_same::value ? 1 : 0; + auto q = std::is_same::value ? 1 : 0; + auto v = value_t{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); + v+=value_t{1}; + } + } + }; + + auto check3 = [](const auto& t) + { + std::array k = {0,0,0}; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 2 : 0; + auto o = op_type{}; + auto v = value_t{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); + v+=value_t{1}; } - }; - - auto check2 = [](const auto& t) - { - std::array k; - auto r = std::is_same::value ? 1 : 0; - auto q = std::is_same::value ? 1 : 0; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); - v+=value_type{1}; - } + } + } + }; + + auto check4 = [](const auto& t) + { + static constexpr auto order = 4; + std::array k = {0,0,0,0}; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? (order-1) : 0; + auto o = op_type{}; + auto v = value_t{}; + for(k[o(r,0)] = 0ul; k[o(r,0)] < t.size(o(r,0)); ++k[o(r,0)]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); + v+=value_t{1}; + } } - }; - - auto check3 = [](const auto& t) - { - std::array k; - using op_type = std::conditional_t, std::minus<>, std::plus<>>; - auto r = std::is_same_v ? 2 : 0; - auto o = op_type{}; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ - for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); - v+=value_type{1}; - } - } - } - }; - - auto check4 = [](const auto& t) - { - std::array k; - using op_type = std::conditional_t, std::minus<>, std::plus<>>; - auto r = std::is_same_v ? 3 : 0; - auto o = op_type{}; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ - for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ - for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); - v+=value_type{1}; - } - } - } - } - }; - - auto check = [check1,check2,check3,check4](auto const&, auto const& e) { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type{e}; - auto v = value_type {}; - for(auto i = 0ul; i < t.size(); ++i){ - t[i] = v; - v+=value_type{1}; - } - - if constexpr(extents_type::_size == 1) check1(t); - else if constexpr(extents_type::_size == 2) check2(t); - else if constexpr(extents_type::_size == 3) check3(t); - else if constexpr(extents_type::_size == 4) check4(t); - - }; - - for_each_tuple(extents,check); + } + } + }; + + auto check = [check1,check2,check3,check4](auto const& /*unused*/, auto const& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t(e); + auto v = value_t {}; + for(auto i = 0ul; i < t.size(); ++i){ + t[i] = v; + v+=value_t{1}; + } + + if constexpr(size == 1) check1(t); + else if constexpr(size == 2) check2(t); + else if constexpr(size == 3) check3(t); + else if constexpr(size == 4) check4(t); + + }; + + for_each_in_tuple(extents,check); } @@ -377,41 +375,42 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents,[&](auto const&, auto& efrom){ - - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - - for_each_tuple(extents,[&](auto const&, auto& eto){ - using extents_type = std::decay_t; - using to_extents_type = std::decay_t; - if constexpr( extents_type::_size == to_extents_type::_size ){ - - auto v = value_type {}; - v+=value_type{1}; - auto t = tensor_type{efrom, v}; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], v ); - - t.reshape(eto); - for(auto i = 0ul; i < std::min(product(efrom),product(eto)); ++i) - BOOST_CHECK_EQUAL( t[i], v ); - - BOOST_CHECK_EQUAL ( t.size() , product(eto) ); - BOOST_CHECK_EQUAL ( t.rank() , eto.size() ); - BOOST_CHECK ( t.extents() == eto ); - - if(efrom != eto){ - for(auto i = product(efrom); i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], value_type{} ); - } - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto const& efrom){ + + using efrom_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto& eto){ + using eto_t = std::decay_t; + + if constexpr( std::tuple_size_v == std::tuple_size_v ){ + + auto v = value_t {}; + v+=value_t{1}; + auto t = tensor_t(efrom); + t = v; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], v ); + + auto t2 = reshape(t,eto); + for(auto i = 0ul; i < std::min(ublas::product(efrom),ublas::product(eto)); ++i) + BOOST_CHECK_EQUAL( t2[i], v ); + + BOOST_CHECK_EQUAL ( t2.size() , ublas::product(eto) ); + BOOST_CHECK_EQUAL ( t2.rank() , ublas::size (eto) ); + BOOST_CHECK ( t2.extents() == eto ); + + if(efrom != eto){ + for(auto i = product(efrom); i < t.size(); ++i) + BOOST_CHECK_EQUAL( t2[i], value_t{} ); + } + } }); + }); } @@ -419,88 +418,92 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixtu BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents,[&](auto const&, auto& e_t){ - - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - - for_each_tuple(extents,[&](auto const&, auto& e_r){ - using extents_type = std::decay_t; - using r_extents_type = std::decay_t; - if constexpr( extents_type::_size == r_extents_type::_size ){ - - auto v = value_type {} + value_type{1}; - auto w = value_type {} + value_type{2}; - auto t = tensor_type{e_t, v}; - auto r = tensor_type{e_r, w}; - - std::swap( r, t ); - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], w ); - - BOOST_CHECK_EQUAL ( t.size() , product(e_r) ); - BOOST_CHECK_EQUAL ( t.rank() , e_r.size() ); - BOOST_CHECK ( t.extents() == e_r ); - - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL( r[i], v ); - - BOOST_CHECK_EQUAL ( r.size() , product(e_t) ); - BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); - BOOST_CHECK ( r.extents() == e_t ); - - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto const& e_t){ + using e_tt = std::decay_t< decltype(e_t) >; + using tensor_t = ublas::tensor_static_rank, layout_t>; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto& e_r){ + + using e_rt = std::decay_t< decltype(e_r) >; + + if constexpr( std::tuple_size_v == std::tuple_size_v ){ + + auto v = value_t {} + value_t{1}; + auto w = value_t {} + value_t{2}; + auto t = tensor_t(e_t); + auto r = tensor_t(e_r); + + t = v; + r = w; + + std::swap( r, t ); + + BOOST_CHECK ( std::all_of(t.begin(),t.end(),[w](auto tt){return tt == w; } ) ) ; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e_r) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e_r) ); + BOOST_CHECK ( t.extents() == e_r ); + BOOST_CHECK ( t.strides() == ublas::to_strides(e_r,layout_t{}) ); + + BOOST_CHECK ( std::all_of(r.begin(),r.end(),[v](auto tt){return tt == v; } ) ) ; + BOOST_CHECK_EQUAL ( r.size() , ublas::product(e_t) ); + BOOST_CHECK_EQUAL ( r.rank() , ublas::size (e_t) ); + BOOST_CHECK ( r.extents() == e_t ); + BOOST_CHECK ( r.strides() == ublas::to_strides(e_t,layout_t{}) ); + + + } }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - for_each_tuple(extents,[](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; + for_each_in_tuple(extents,[](auto const& /*unused*/, auto const& e){ + using et = std::decay_t< decltype(e) >; + using tensor_t = ublas::tensor_static_rank, layout_t>; - auto v = value_type {} + value_type{1}; - auto t = tensor_type{e, v}; + auto v = value_t {} + value_t{1}; + auto t = tensor_t(e); + t = v; - BOOST_CHECK_EQUAL( std::distance(t.begin(), t.end ()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.rbegin(), t.rend()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.begin(), t.end ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.rbegin(), t.rend()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); - if(t.size() > 0) { - BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; - BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; - } - }); + if(!t.empty()) { + BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; + BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; + } + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::fixed_rank_tensor; - - std::vector vec(30); - BOOST_CHECK_THROW(tensor_type({5,5},vec), std::runtime_error); - - auto t = tensor_type{{5,5}}; - auto i = ublas::index::index_type<4>{}; - BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_static_rank; + + auto vec = std::vector(1); + BOOST_CHECK_THROW(tensor_t({5,5},vec), std::length_error); + + // Does not throw but results in a static assertion +// auto t = tensor_t{{5,5}}; +// auto i = ublas::index::index_type<4>{}; +// BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); } diff --git a/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp b/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp index e6cba14e9..5ed7a4f56 100644 --- a/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp +++ b/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -19,35 +19,35 @@ #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_tensor_matrix_interoperability ) ; +BOOST_AUTO_TEST_SUITE ( test_tensor_static_rank_matrix_interoperability ) using test_types = zip::with_t; BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + using layout = typename value::second_type; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; - ublas::fixed_rank_tensor a2 = matrix_type(1,1); + auto a2 = tensor( matrix(1,1) ); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - ublas::fixed_rank_tensor a3 = matrix_type(2,1); + auto a3 = tensor( matrix(2,1) ); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - ublas::fixed_rank_tensor a4 = matrix_type(1,2); + auto a4 = tensor( matrix(1,2) ); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - ublas::fixed_rank_tensor a5 = matrix_type(2,3); + auto a5 = tensor( matrix(2,3) ); BOOST_CHECK_EQUAL( a5.size() , 6 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); @@ -56,28 +56,28 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_static_rank; using vector_type = typename tensor_type::vector_type; - ublas::fixed_rank_tensor a2 = vector_type(1); + auto a2 = tensor_type( vector_type(1) ); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - ublas::fixed_rank_tensor a3 = vector_type(2); + auto a3 = tensor_type( vector_type(2) ); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - ublas::fixed_rank_tensor a4 = vector_type(2); + auto a4 = tensor_type( vector_type(2) ); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - ublas::fixed_rank_tensor a5 = vector_type(3); + auto a5 = tensor_type( vector_type(3) ); BOOST_CHECK_EQUAL( a5.size() , 3 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); @@ -93,12 +93,14 @@ struct fixture extents_type<2>, // 0 extents_type<2>, // 1 extents_type<2>, // 2 - extents_type<2> // 3 - > extents = { - {1,2}, - {2,1}, - {9,7}, - {12,12}, + extents_type<2>, // 3 + extents_type<2> // 4 + > extents = { + {1,1}, + {1,2}, + {2,1}, + {6,6}, + {9,7}, }; }; @@ -107,159 +109,158 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size()==2); - etensor_type t = matrix_type{e[0],e[1]}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + + assert(ublas::size(e)==2); + tensor t = matrix{e[0],e[1]}; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - assert(e.size()==2); - if(e.empty()) - return; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using vector = typename tensor::vector_type; - etensor_type t = vector_type(product(e)); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - }; + assert(ublas::size(e)==2); + if(ublas::empty(e)) + return; + + tensor t = vector (product(e)); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r; - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), r(i,j) ); - } - } - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { + using etype = std::decay_t; + constexpr auto size = std::tuple_size_v; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor{e[1],e[0]}; + auto r = matrix(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r; + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), r(i,j) ); + } + } + }); + + //for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r; - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t[i], r(i) ); - } - }; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; - for_each_tuple(extents,check); + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r; + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t[i], r(i) ); + } + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - auto q = r; - t = std::move(r); - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), q(i,j) ); - } - } - }; - for_each_tuple(extents,check); + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using matrix_type = typename tensor_type::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = matrix_type(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + auto q = r; + t = std::move(r); + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), q(i,j) ); + } + } + }; + + for_each_in_tuple(extents,check); } @@ -267,36 +268,35 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, te BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - auto q = r; - t = std::move(r); - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t[i], q(i) ); - } - }; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; - for_each_tuple(extents,check); + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + auto q = r; + t = std::move(r); + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t[i], q(i) ); + } + }; + + for_each_in_tuple(extents,check); } @@ -305,57 +305,57 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, te BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r - - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); - BOOST_CHECK ( !s.empty() ); - BOOST_CHECK_NE ( s.data() , nullptr); - - BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); - BOOST_CHECK ( !q.empty() ); - BOOST_CHECK_NE ( q.data() , nullptr); - - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), 4*r(i,j) ); - BOOST_CHECK_EQUAL( s.at(i,j), t.at(i,j) ); - BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); - } - } - }; - for_each_tuple(extents,check); + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using matrix_type = typename tensor_type::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = matrix_type(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r + 3*r; + tensor_type s = r + 3*r; + tensor_type q = s + r + 3*r + s; // + 3*r + + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); + BOOST_CHECK ( !s.empty() ); + BOOST_CHECK_NE ( s.data() , nullptr); + + BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); + BOOST_CHECK ( !q.empty() ); + BOOST_CHECK_NE ( q.data() , nullptr); + + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), 4*r(i,j) ); + BOOST_CHECK_EQUAL( s.at(i,j), t.at(i,j) ); + BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); + } + } + }; + + for_each_in_tuple(extents,check); } @@ -365,106 +365,107 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r - - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); - BOOST_CHECK ( !s.empty() ); - BOOST_CHECK_NE ( s.data() , nullptr); - - BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); - BOOST_CHECK ( !q.empty() ); - BOOST_CHECK_NE ( q.data() , nullptr); - - - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t.at(i), 4*r(i) ); - BOOST_CHECK_EQUAL( s.at(i), t.at(i) ); - BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); - } - }; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; - for_each_tuple(extents,check); + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r + 3*r; + tensor_type s = r + 3*r; + tensor_type q = s + r + 3*r + s; // + 3*r + + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); + BOOST_CHECK ( !s.empty() ); + BOOST_CHECK_NE ( s.data() , nullptr); + + BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); + BOOST_CHECK ( !q.empty() ); + BOOST_CHECK_NE ( q.data() , nullptr); + + + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t.at(i), 4*r(i) ); + BOOST_CHECK_EQUAL( s.at(i), t.at(i) ); + BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); + } + }; + + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - if(product(e) <= 2) - return; - assert(e.size() == 2); - auto Q = etensor_type{e[0],1}; - auto A = matrix_type(e[0],e[1]); - auto b = vector_type(e[1]); - auto c = vector_type(e[0]); - std::iota(b.data().begin(),b.data().end(), 1); - std::fill(A.data().begin(),A.data().end(), 1); - std::fill(c.data().begin(),c.data().end(), 2); - std::fill(Q.begin(),Q.end(), 2); - - etensor_type T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; - - BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); - BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); - BOOST_CHECK_EQUAL ( T.size() , Q.size() ); - BOOST_CHECK_EQUAL ( T.size() , c.size() ); - BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); - BOOST_CHECK ( !T.empty() ); - BOOST_CHECK_NE ( T.data() , nullptr); - - for(auto i = 0ul; i < T.size(); ++i){ - auto n = e[1]; - auto ab = n * (n+1) / 2; - BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - } - - }; - - - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + using vector = typename tensor::vector_type; + + if(product(e) <= 2) + return; + assert(ublas::size(e) == 2); + auto Q = tensor{e[0],1}; + auto A = matrix(e[0],e[1]); + auto b = vector(e[1]); + auto c = vector(e[0]); + std::iota(b.data().begin(),b.data().end(), 1); + std::fill(A.data().begin(),A.data().end(), 1); + std::fill(c.data().begin(),c.data().end(), 2); + std::fill(Q.begin(),Q.end(), 2); + + tensor T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; + + BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); + BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); + BOOST_CHECK_EQUAL ( T.size() , Q.size() ); + BOOST_CHECK_EQUAL ( T.size() , c.size() ); + BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); + BOOST_CHECK ( !T.empty() ); + BOOST_CHECK_NE ( T.data() , nullptr); + + const auto n = e[1]; + const auto ab = value(std::div(n*(n+1),2).quot); + const auto ref = ab+4*Q(0)+2*c(0); + BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); + +// for(auto i = 0ul; i < T.size(); ++i){ +// auto n = e[1]; +// auto ab = n * (n+1) / 2; +// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); +// } + + }; + for_each_in_tuple(extents,check); } diff --git a/test/tensor/test_functions.cpp b/test/tensor/test_functions.cpp index e1d32bef0..c5a55048c 100644 --- a/test/tensor/test_functions.cpp +++ b/test/tensor/test_functions.cpp @@ -22,7 +22,6 @@ #include "utility.hpp" -// BOOST_AUTO_TEST_SUITE ( test_tensor_functions, * boost::unit_test::depends_on("test_tensor_contraction") ) BOOST_AUTO_TEST_SUITE ( test_tensor_functions) @@ -52,10 +51,10 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; using vector_type = typename tensor_type::vector_type; @@ -63,7 +62,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, f auto a = tensor_type(n, value_type{2}); - for(auto m = 0u; m < n.size(); ++m){ + for(auto m = 0u; m < ublas::size(n); ++m){ auto b = vector_type (n[m], value_type{1} ); @@ -78,30 +77,28 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, f auto a = tensor_type(n, value_type{2}); auto b = vector_type(n[0], value_type{1}); - auto zero_rank_empty_tensor = tensor_type{}; auto empty = vector_type{}; BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); - BOOST_CHECK_THROW(prod(zero_rank_empty_tensor, b, 1), std::length_error); BOOST_CHECK_THROW(prod(a, empty, 2), std::length_error); } BOOST_AUTO_TEST_CASE( test_tensor_prod_vector_exception ) { - using namespace boost::numeric; - using value_type = float; - using layout_type = ublas::layout::first_order; - using d_tensor_type = ublas::dynamic_tensor; - using vector_type = typename d_tensor_type::vector_type; - - auto t1 = d_tensor_type{ublas::extents<>{},1.f}; - auto v1 = vector_type{3,value_type{1}}; - - BOOST_REQUIRE_THROW(prod(t1,v1,0),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,v1,1),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,v1,3),std::length_error); +// namespace ublas = boost::numeric::ublas; +// using value_type = float; +// using layout_type = ublas::layout::first_order; +// using d_tensor_type = ublas::tensor_dynamic; +// using vector_type = typename d_tensor_type::vector_type; + +// auto t1 = d_tensor_type{ublas::extents<>{},1.f}; +// auto v1 = vector_type{3,value_type{1}}; + +// BOOST_REQUIRE_THROW(prod(t1,v1,0),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,v1,1),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,v1,3),std::length_error); } @@ -109,10 +106,10 @@ BOOST_AUTO_TEST_CASE( test_tensor_prod_vector_exception ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; using matrix_type = typename tensor_type::matrix_type; @@ -120,7 +117,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, value, test_types, f auto a = tensor_type(n, value_type{2}); - for(auto m = 0u; m < n.size(); ++m){ + for(auto m = 0u; m < ublas::size(n); ++m){ auto b = matrix_type ( n[m], n[m], value_type{1} ); @@ -136,39 +133,37 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, value, test_types, f auto a = tensor_type(n, value_type{2}); auto b = matrix_type(n[0], n[0], value_type{1}); - auto zero_rank_empty_tensor = tensor_type{}; auto empty = matrix_type{}; BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); - BOOST_CHECK_THROW(prod(zero_rank_empty_tensor, b, 1), std::length_error); - BOOST_CHECK_THROW(prod(a, empty, 2), std::length_error); + BOOST_CHECK_THROW(prod(a, empty, 2), std::invalid_argument); } BOOST_AUTO_TEST_CASE( test_tensor_prod_matrix_exception ) { - using namespace boost::numeric; - using value_type = float; - using layout_type = ublas::layout::first_order; - using d_extents_type = ublas::extents<>; - using d_tensor_type = ublas::dynamic_tensor; - using matrix_type = typename d_tensor_type::matrix_type; +// namespace ublas = boost::numeric::ublas; +// using value_type = float; +// using layout_type = ublas::layout::first_order; +// using d_extents_type = ublas::extents<>; +// using d_tensor_type = ublas::tensor_dynamic; +// using matrix_type = typename d_tensor_type::matrix_type; - auto t1 = d_tensor_type{d_extents_type{},1.f}; - auto m1 = matrix_type{3,3,value_type{1}}; +// auto t1 = d_tensor_type{d_extents_type{},1.f}; +// auto m1 = matrix_type{3,3,value_type{1}}; - BOOST_REQUIRE_THROW(prod(t1,m1,0),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,m1,1),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,m1,3),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,m1,0),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,m1,1),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,m1,3),std::length_error); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_1, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; // left-hand and right-hand side have the // the same number of elements @@ -202,13 +197,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_1, value, test_types, BOOST_AUTO_TEST_CASE( test_tensor_prod_tensor_1_exception ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = float; using layout_type = ublas::layout::first_order; using d_extents_type = ublas::extents<>; - using d_tensor_type = ublas::dynamic_tensor; + using d_tensor_type = ublas::tensor_dynamic; + - auto t1 = d_tensor_type{}; std::vector phia = {1,2,3}; std::vector phib = {1,2,3,4,5}; @@ -236,10 +231,11 @@ BOOST_AUTO_TEST_CASE( test_tensor_prod_tensor_1_exception ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_2, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using extents_type = typename tensor_type::extents_type; auto compute_factorial = [](auto const& p){ @@ -250,11 +246,11 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_2, value, test_types, }; auto permute_extents = [](auto const& pi, auto const& na){ - auto nb = na; - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - nb[pi[j]-1] = na[j]; - return nb; + auto nb_base = na.base(); + assert(pi.size() == ublas::size(na)); + for(auto j = 0u; j < pi.size(); ++j) + nb_base[pi[j]-1] = na[j]; + return extents_type(nb_base); }; @@ -304,22 +300,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_2, value, test_types, auto phia = std::vector(3); auto sphia = std::vector(2); - BOOST_CHECK_THROW(ublas::prod(tensor_type{}, tensor_type({2,1,2}), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2,3}), tensor_type(), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2,4}), tensor_type({2,1}), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({2,1,2}), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({2,1,3}), sphia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({2,2}), phia, sphia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({4,4}), sphia, phia), std::runtime_error); +// BOOST_CHECK_THROW(ublas::prod(tensor_type{}, tensor_type({2,1,2}), phia, phia), std::runtime_error); +// BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2,3}), tensor_type(), phia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2,4}, tensor_type{2,1}, phia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{2,1,2}, phia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{2,1,3}, sphia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{2,2}, phia, sphia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{4,4}, sphia, phia), std::runtime_error); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_inner_prod, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& n : extents) { @@ -333,19 +329,19 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_inner_prod, value, test_types, fi BOOST_CHECK_EQUAL( c , r ); } - BOOST_CHECK_THROW(ublas::inner_prod(tensor_type({1,2,3}), tensor_type({1,2,3,4})), std::length_error); // rank different - BOOST_CHECK_THROW(ublas::inner_prod(tensor_type(), tensor_type()), std::length_error); //empty tensor - BOOST_CHECK_THROW(ublas::inner_prod(tensor_type({1,2,3}), tensor_type({3,2,1})), std::length_error); // different extent + BOOST_CHECK_THROW(ublas::inner_prod(tensor_type{1,2,3}, tensor_type{1,2,3,4}), std::length_error); // rank different +// BOOST_CHECK_THROW(ublas::inner_prod(tensor_type(), tensor_type()), std::length_error); //empty tensor + BOOST_CHECK_THROW(ublas::inner_prod(tensor_type{1,2,3}, tensor_type{3,2,1}), std::length_error); // different extent } BOOST_AUTO_TEST_CASE( test_tensor_inner_prod_exception ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = float; using layout_type = ublas::layout::first_order; using d_extents_type = ublas::extents<>; - using d_tensor_type = ublas::dynamic_tensor; + using d_tensor_type = ublas::tensor_dynamic; auto t1 = d_tensor_type{d_extents_type{1,2},1.f}; auto t2 = d_tensor_type{d_extents_type{1,2,3},1.f}; @@ -354,10 +350,10 @@ BOOST_AUTO_TEST_CASE( test_tensor_inner_prod_exception ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_norm, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& n : extents) { @@ -373,10 +369,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_norm, value, test_types, fixture auto c = ublas::inner_prod(a, a); auto r = std::inner_product(a.begin(),a.end(), a.begin(),value_type(0)); - tensor_type var = (a+a)/2.0f; // std::complex/int not allowed as expression is captured + tensor_type var = (a+a)/value_type(2); // std::complex/int not allowed as expression is captured auto r2 = ublas::norm( var ); - BOOST_CHECK_THROW(ublas::norm(tensor_type{}), std::runtime_error); +// BOOST_CHECK_THROW(ublas::norm(tensor_type{}), std::runtime_error); BOOST_CHECK_EQUAL( c , r ); BOOST_CHECK_EQUAL( std::sqrt( c ) , r2 ); @@ -386,13 +382,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_norm, value, test_types, fixture BOOST_FIXTURE_TEST_CASE( test_tensor_real_imag_conj, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = float; using complex_type = std::complex; using layout_type = ublas::layout::first_order; - using tensor_complex_type = ublas::dynamic_tensor; - using tensor_type = ublas::dynamic_tensor; + using tensor_complex_type = ublas::tensor_dynamic; + using tensor_type = ublas::tensor_dynamic; for(auto const& n : extents) { @@ -460,10 +456,10 @@ BOOST_FIXTURE_TEST_CASE( test_tensor_real_imag_conj, fixture ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_outer_prod, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& n1 : extents) { auto a = tensor_type(n1, value_type(2)); @@ -500,10 +496,10 @@ void init(std::vector>& a) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_trans, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto fak = [](auto const& p){ auto f = 1ul; @@ -521,8 +517,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_trans, value, test_types, fixture for(auto const& n : extents) { - auto const p = n.size(); - auto const s = product(n); + auto const p = ublas::size(n); + auto const s = ublas::product(n); auto aref = tensor_type(n); auto v = value_type{}; for(auto i = 0u; i < s; ++i, v+=1) diff --git a/test/tensor/test_multi_index.cpp b/test/tensor/test_multi_index.cpp index 63289e52f..fdbbac7de 100644 --- a/test/tensor/test_multi_index.cpp +++ b/test/tensor/test_multi_index.cpp @@ -29,46 +29,46 @@ using test_types = zip>::with_t ind(_a, _b); + ublas::multi_index<2> ind(i::_a, i::_b); BOOST_CHECK_EQUAL ( get<0>( ind ), 1 ) ; BOOST_CHECK_EQUAL ( get<1>( ind ), 2 ) ; @@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_class_construction ) { - multi_index<2> ind(_d,_c); + ublas::multi_index<2> ind(i::_d,i::_c); BOOST_CHECK_EQUAL ( ind[0] , 4 ) ; BOOST_CHECK_EQUAL ( ind[1] , 3 ) ; @@ -86,58 +86,59 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_class_construction ) BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_multi_index_class_generation, value, test_types ) { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto t = std::make_tuple ( - index::_a, // 0 - index::_b, // 1 - index::_c, // 2 - index::_d, // 3 - index::_e // 4 + i::_a, // 0 + i::_b, // 1 + i::_c, // 2 + i::_d, // 3 + i::_e // 4 ); { - auto a = tensor_type(extents<>{2,3}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3}, value_type{2}); auto a_ind = a( std::get<0>(t), std::get<2>(t) ); BOOST_CHECK_EQUAL ( std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_a() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_c() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_a() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_c() ) ; } { - auto a = tensor_type(extents<>{2,3}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3}, value_type{2}); auto a_ind = a( std::get<2>(t), std::get<0>(t) ); BOOST_CHECK_EQUAL ( std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_c() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_a() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_c() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_a() ) ; } { - auto a = tensor_type(extents<>{2,3}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3}, value_type{2}); auto a_ind = a( std::get<2>(t), std::get<3>(t) ); BOOST_CHECK_EQUAL (std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_c() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_d() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_c() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_d() ) ; } { - auto a = tensor_type(extents<>{2,3,4}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3,4}, value_type{2}); auto a_ind = a( std::get<2>(t), std::get<3>(t), std::get<0>(t) ); BOOST_CHECK_EQUAL (std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_c() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_d() ) ; - BOOST_CHECK_EQUAL (std::get<2>(a_ind.second)(), index::_a() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_c() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_d() ) ; + BOOST_CHECK_EQUAL (std::get<2>(a_ind.second)(), i::_a() ) ; } } diff --git a/test/tensor/test_multi_index_utility.cpp b/test/tensor/test_multi_index_utility.cpp index a4ed17e1c..3cd35de9a 100644 --- a/test/tensor/test_multi_index_utility.cpp +++ b/test/tensor/test_multi_index_utility.cpp @@ -19,40 +19,40 @@ BOOST_AUTO_TEST_SUITE ( test_multi_index_utility ) BOOST_AUTO_TEST_CASE ( test_multi_index_has_index ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = boost::numeric::ublas::index; { constexpr auto tuple = std::tuple<>{}; - constexpr auto has_a = has_index::value; - constexpr auto has_b = has_index::value; + constexpr auto has_a = ublas::has_index::value; + constexpr auto has_b = ublas::has_index::value; BOOST_CHECK( !has_a ); BOOST_CHECK( !has_b ); } { - constexpr auto tuple = std::make_tuple(_a); - constexpr auto has_a = has_index::value; - constexpr auto has_b = has_index::value; + constexpr auto tuple = std::make_tuple(i::_a); + constexpr auto has_a = ublas::has_index::value; + constexpr auto has_b = ublas::has_index::value; BOOST_CHECK( has_a ); BOOST_CHECK( !has_b ); } { - constexpr auto tuple = std::make_tuple(_a,_b,_,_c,_d); - constexpr auto has_a = has_index::value; - constexpr auto has_b = has_index::value; - constexpr auto has_c = has_index::value; - constexpr auto has_d = has_index::value; - constexpr auto has_e = has_index::value; - constexpr auto has__ = has_index::value; + constexpr auto tuple = std::make_tuple(i::_a,i::_b,i::_,i::_c,i::_d); + constexpr auto has_a = ublas::has_index::value; + constexpr auto has_b = ublas::has_index::value; + constexpr auto has_c = ublas::has_index::value; + constexpr auto has_d = ublas::has_index::value; + constexpr auto has_e = ublas::has_index::value; + constexpr auto has = ublas::has_index::value; BOOST_CHECK( has_a ); BOOST_CHECK( has_b ); BOOST_CHECK( has_c ); BOOST_CHECK( has_d ); BOOST_CHECK( !has_e ); - BOOST_CHECK( has__ ); + BOOST_CHECK( has ); } } @@ -60,55 +60,55 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_has_index ) BOOST_AUTO_TEST_CASE ( test_multi_index_valid ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto tuple = std::tuple<>{}; - constexpr auto valid = valid_multi_index::value; + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_a); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_a); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_a,_,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_a,i::_,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_a,_,_b,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_a,i::_,i::_b,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( !valid ); } { - constexpr auto tuple = std::make_tuple(_c,_a,_,_b,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_a,i::_,i::_b,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( !valid ); } { - constexpr auto tuple = std::make_tuple(_c,_a,_,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_a,i::_,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_,_c,_a,_,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_,i::_c,i::_a,i::_,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_,_c,_a,_,_b,_); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_,i::_c,i::_a,i::_,i::_b,i::_); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } } @@ -119,136 +119,136 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_valid ) BOOST_AUTO_TEST_CASE ( test_multi_index_number_equal_indices ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto lhs = std::tuple<>{}; constexpr auto rhs = std::tuple<>{}; - constexpr auto num = number_equal_indexes::value; + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { - constexpr auto lhs = std::make_tuple(_a); + constexpr auto lhs = std::make_tuple(i::_a); constexpr auto rhs = std::tuple<>{}; - constexpr auto num = number_equal_indexes::value; + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { constexpr auto lhs = std::tuple<>{}; - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 3 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 3 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 3 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple( _,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } } @@ -261,42 +261,42 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_number_equal_indices ) BOOST_AUTO_TEST_CASE ( test_multi_index_index_position ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto tuple = std::tuple<>{}; - constexpr auto ind = index_position::value; + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,1); } { - constexpr auto tuple = std::make_tuple(_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,1); } { - constexpr auto tuple = std::make_tuple(_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,2); } @@ -304,26 +304,26 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position ) { - constexpr auto tuple = std::make_tuple(_c,_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_c,_,_a,_); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a,i::_); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,1); } { - constexpr auto tuple = std::make_tuple(_c,_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,2); } { - constexpr auto tuple = std::make_tuple(_c,_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,3); } @@ -338,71 +338,71 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position ) BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto lhs = std::tuple<>{}; constexpr auto rhs = std::tuple<>{}; - auto array = index_position_pairs(lhs, rhs); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { - constexpr auto lhs = std::make_tuple(_a); + constexpr auto lhs = std::make_tuple(i::_a); constexpr auto rhs = std::tuple<>{}; - auto array = index_position_pairs(lhs, rhs); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { constexpr auto lhs = std::tuple<>{}; - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); } { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); } @@ -410,10 +410,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -421,10 +421,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -432,10 +432,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -443,10 +443,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -454,10 +454,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 3ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 3ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -467,10 +467,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 3ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 3ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -480,10 +480,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 3ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 3ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -493,10 +493,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple( _,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 2 ); @@ -504,19 +504,19 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 2 ); BOOST_CHECK_EQUAL(array[0].second, 2 ); } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 2 ); BOOST_CHECK_EQUAL(array[0].second, 2 ); } @@ -524,39 +524,39 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) -BOOST_AUTO_TEST_CASE ( test_multi_index_array_to_vector ) -{ - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; +//BOOST_AUTO_TEST_CASE ( test_multi_index_array_to_vector ) +//{ +// namespace ublas = boost::numeric::ublas; +// namespace i = ublas::index; - auto check = [](auto const& lhs, auto const& rhs) - { - auto array = index_position_pairs(lhs, rhs); +// auto check = [](auto const& lhs, auto const& rhs) +// { +// auto array = ublas::index_position_pairs(lhs, rhs); - auto vector_pair = array_to_vector( array ); +// auto vector_pair = ublas::array_to_vector( array ); - BOOST_CHECK_EQUAL(vector_pair.first .size(), array.size() ); - BOOST_CHECK_EQUAL(vector_pair.second.size(), array.size() ); +// BOOST_CHECK_EQUAL(vector_pair.first .size(), array.size() ); +// BOOST_CHECK_EQUAL(vector_pair.second.size(), array.size() ); - for(auto i = 0ul; i < array.size(); ++i) - { - BOOST_CHECK_EQUAL(vector_pair.first [i], array[i].first +1 ); - BOOST_CHECK_EQUAL(vector_pair.second[i], array[i].second+1 ); - } +// for(auto i = 0ul; i < array.size(); ++i) +// { +// BOOST_CHECK_EQUAL(vector_pair.first [i], array[i].first +1 ); +// BOOST_CHECK_EQUAL(vector_pair.second[i], array[i].second+1 ); +// } - }; +// }; - check(std::tuple<>{} , std::tuple<>{}); - check(std::make_tuple(_a) , std::tuple<>{}); - check(std::tuple<>{} , std::make_tuple(_a)); - check(std::make_tuple(_a) , std::make_tuple(_b)); - check(std::make_tuple(_a) , std::make_tuple(_a)); - check(std::make_tuple(_a,_b), std::make_tuple(_a)); - check(std::make_tuple(_a) , std::make_tuple(_a,_b)); - check(std::make_tuple(_a,_b), std::make_tuple(_a,_b)); - check(std::make_tuple(_b,_a), std::make_tuple(_a,_b)); - check(std::make_tuple(_b,_a,_c), std::make_tuple(_a,_b,_d)); -} +// check(std::tuple<>{} , std::tuple<>{}); +// check(std::make_tuple(i::_a) , std::tuple<>{}); +// check(std::tuple<>{} , std::make_tuple(i::_a)); +// check(std::make_tuple(i::_a) , std::make_tuple(i::_b)); +// check(std::make_tuple(i::_a) , std::make_tuple(i::_a)); +// check(std::make_tuple(i::_a,i::_b), std::make_tuple(i::_a)); +// check(std::make_tuple(i::_a) , std::make_tuple(i::_a,i::_b)); +// check(std::make_tuple(i::_a,i::_b), std::make_tuple(i::_a,i::_b)); +// check(std::make_tuple(i::_b,i::_a), std::make_tuple(i::_a,i::_b)); +// check(std::make_tuple(i::_b,i::_a,i::_c), std::make_tuple(i::_a,i::_b,i::_d)); +//} diff --git a/test/tensor/test_multiplication.cpp b/test/tensor/test_multiplication.cpp index 19affdeb8..c5ed51e5f 100644 --- a/test/tensor/test_multiplication.cpp +++ b/test/tensor/test_multiplication.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -16,10 +16,7 @@ #include #include -#include -#include -#include -#include +#include #include "utility.hpp" #include @@ -35,386 +32,384 @@ using test_types = zip>::with_t; - fixture() - : extents { - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{5,4}, // 5 - extents_type{2,3,1}, // 6 - extents_type{4,1,3}, // 7 - extents_type{1,2,3}, // 8 - extents_type{4,2,3}, // 9 - extents_type{4,2,3,5}} // 10 - { - } - std::vector extents; + using extents_t = boost::numeric::ublas::extents<>; + const std::vector extents = + { + extents_t{1,1}, // 1 + extents_t{1,2}, // 2 + extents_t{2,1}, // 3 + extents_t{2,3}, // 4 + extents_t{5,4}, // 5 + extents_t{2,3,1}, // 6 + extents_t{4,1,3}, // 7 + extents_t{1,2,3}, // 8 + extents_t{4,2,3}, // 9 + extents_t{4,2,3,5} // 10 + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_tensor_mtv, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using extents_type_base = typename extents_type::base_type; - using size_type = typename extents_type_base::value_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - for(auto const& na : extents) { + for(auto const& na : extents) { - if(na.size() > 2) - continue; + if(ublas::size(na) > 2) + continue; - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); - for(auto m = 0u; m < na.size(); ++m){ - auto nb = extents_type {na[m],1}; - auto wb = strides_type (nb); - auto b = vector_type (product(nb), value_type{1} ); + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); + for(auto m = std::size_t{0}; m < ublas::size(na); ++m){ + auto nb = extents_t {na[m],std::size_t{1}}; + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t (ublas::product(nb), value_t{1} ); - auto nc_base = extents_type_base(std::max(na.size()-1, size_type{2}), 1); + auto nc_base = extents_base_t(std::max(std::size_t{ublas::size(na)-1u}, std::size_t{2}), 1); - for(auto i = 0u, j = 0u; i < na.size(); ++i) - if(i != m) - nc_base[j++] = na[i]; + for(auto i = 0ul, j = 0ul; i < ublas::size(na); ++i) + if(i != m) + nc_base[j++] = na[i]; - auto nc = extents_type (nc_base); - auto wc = strides_type (nc); - auto c = vector_type (product(nc), value_type{0}); + auto nc = extents_t (nc_base); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc), value_t{0}); - ublas::detail::recursive::mtv( - size_type(m), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data()); + ublas::detail::recursive::mtv( + m, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data()); + auto v = value_t(na[m]); + BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(na[m]) ) * a[i] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[m]) ) * a[i] ); - } } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_mtm, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - // using extents_type_base = typename extents_type::base_type; - + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; - for(auto const& na : extents) { + for(auto const& na : extents) { - if(na.size() != 2) - continue; + if(ublas::size(na) != 2) + continue; - auto a = vector_type (product(na), value_type{2}); - auto wa = strides_type (na); + auto a = vector_t (ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); - auto nb = extents_type {na[1],na[0]}; - auto wb = strides_type (nb); - auto b = vector_type (product(nb), value_type{1} ); + auto nb = extents_t {na[1],na[0]}; + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t (ublas::product(nb), value_t{1} ); - auto nc = extents_type {na[0],nb[1]}; -auto wc = strides_type (nc); -auto c = vector_type (product(nc)); + auto nc = extents_t {na[0],nb[1]}; + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc)); -ublas::detail::recursive::mtm( - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + ublas::detail::recursive::mtm( + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); + auto v = value_t(na[1])*a[0]; + BOOST_CHECK(std::all_of(c.begin(),c.end(), [v](auto cc){return cc == v;})); -for(auto i = 0u; i < c.size(); ++i) -BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(na[1]) ) * a[0] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[1]) ) * a[0] ); -} + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttv, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using extents_type_base = typename extents_type::base_type; - using size_type = typename extents_type_base::value_type; - + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - for(auto const& na : extents) { + for(auto const& na : extents) { - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); - for(auto m = 0u; m < na.size(); ++m){ - auto b = vector_type (na[m], value_type{1} ); - auto nb = extents_type {na[m],1}; - auto wb = strides_type (nb); + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); + for(auto m = std::size_t{0}; m < ublas::size(na); ++m){ + auto b = vector_t (na[m], value_t{1} ); + auto nb = extents_t {na[m],1}; + auto wb = ublas::to_strides(nb,layout_t{}); - auto nc_base = extents_type_base(std::max(na.size()-1, size_type(2)),1); + auto nc_base = extents_base_t(std::max(std::size_t{ublas::size(na)-1u}, std::size_t{2}),1); - for(auto i = 0ul, j = 0ul; i < na.size(); ++i) - if(i != m) - nc_base[j++] = na[i]; + for(auto i = 0ul, j = 0ul; i < ublas::size(na); ++i) + if(i != m) + nc_base[j++] = na[i]; - auto nc = extents_type (nc_base); - auto wc = strides_type (nc); - auto c = vector_type (product(nc), value_type{0}); + auto nc = extents_t (nc_base); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc), value_t{0}); - ublas::ttv(size_type(m+1), na.size(), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + ublas::ttv(m+1, ublas::size(na), + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); + auto v = value_t(na[m]); + BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type(na[m]) * a[i] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t(na[m]) * a[i] ); - } } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttm, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename extents_type::value_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + + + for(auto const& na : extents) { + + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); + for(auto m = std::size_t{0}; m < ublas::size(na); ++m){ + const auto nb = extents_t {na[m], na[m] }; + const auto b = vector_t (ublas::product(nb), value_t{1} ); + const auto wb = ublas::to_strides(nb,layout_t{}); - for(auto const& na : extents) { + const auto& nc = na; + const auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc), value_t{0}); - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); - for(auto m = 0u; m < na.size(); ++m){ - auto nb = extents_type {na[m], na[m] }; - auto b = vector_type (product(nb), value_type{1} ); - auto wb = strides_type (nb); + ublas::ttm(m+1, ublas::size(na), + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); - auto nc = na; - auto wc = strides_type (nc); - auto c = vector_type (product(nc), value_type{0}); + auto v = value_t(na[m]); + BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); - ublas::ttm(size_type(m+1), na.size(), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(na[m]) ) * a[i] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[m]) ) * a[i] ); - } } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttt_permutation, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename strides_type::value_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - auto compute_factorial = [](auto const& p){ - auto f = 1ul; - for(auto i = 1u; i <= p; ++i) - f *= i; - return f; - }; + auto compute_factorial = [](auto const& p){ + auto f = 1ul; + for(auto i = 1u; i <= p; ++i) + f *= i; + return f; + }; - auto compute_inverse_permutation = [](auto const& pi){ - auto pi_inv = pi; - for(auto j = 0u; j < pi.size(); ++j) - pi_inv[pi[j]-1] = j+1; - return pi_inv; - }; + auto compute_inverse_permutation = [](auto const& pi){ + auto pi_inv = pi; + for(auto j = 0u; j < pi.size(); ++j) + pi_inv[pi[j]-1] = j+1; + return pi_inv; + }; - auto permute_extents = [](auto const& pi, auto const& na){ - auto nb = na; - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - nb[j] = na[pi[j]-1]; - return nb; - }; + auto permute_extents = [](auto const& pi, auto const& na){ + auto nb_base = na.base(); + assert(pi.size() == ublas::size(na)); + for(auto j = 0u; j < pi.size(); ++j) + nb_base[j] = na[pi[j]-1]; + return extents_t(nb_base); + }; - // left-hand and right-hand side have the - // the same number of elements + // left-hand and right-hand side have the + // the same number of elements - // computing the inner product with - // different permutation tuples for - // right-hand side + // computing the inner product with + // different permutation tuples for + // right-hand side - for(auto const& na : extents) { + for(auto const& na : extents) { - auto wa = strides_type(na); - auto a = vector_type(product(na), value_type{2}); - auto pa = na.size(); - auto pia = std::vector(pa); - std::iota( pia.begin(), pia.end(), 1 ); + auto wa = ublas::to_strides(na,layout_t{}); + auto a = vector_t(ublas::product(na), value_t{2}); + auto pa = ublas::size(na); + auto pia = std::vector(pa); + std::iota( pia.begin(), pia.end(), std::size_t{1} ); - auto pib = pia; - auto pib_inv = compute_inverse_permutation(pib); + auto pib = pia; + auto pib_inv = compute_inverse_permutation(pib); - auto f = compute_factorial(pa); + auto f = compute_factorial(pa); - // for the number of possible permutations - // only permutation tuple pib is changed. - for(auto i = 0u; i < f; ++i) { + // for the number of possible permutations + // only permutation tuple pib is changed. + for(auto i = 0u; i < f; ++i) { - auto nb = permute_extents( pib, na ); - auto wb = strides_type(nb); - auto b = vector_type(product(nb), value_type{3}); - auto pb = nb.size(); + auto nb = permute_extents( pib, na ); + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t(ublas::product(nb), value_t{3}); + auto pb = ublas::size(nb); - // the number of contractions is changed. - for( auto q = size_type(0); q <= pa; ++q) { + // the number of contractions is changed. + for(auto q = std::size_t{0}; q <= pa; ++q) { - auto r = pa - q; - auto s = pb - q; + auto r = pa - q; + auto s = pb - q; - auto pc = r+s > 0 ? std::max(r+s,size_type(2)) : size_type(2); + auto pc = r+s > 0 ? std::max(std::size_t{r+s},std::size_t{2}) : std::size_t{2}; - auto nc_base = std::vector( pc , 1 ); + auto nc_base = extents_base_t(pc,std::size_t{1}); - for(auto j = 0u; j < r; ++j) - nc_base[ j ] = na[ pia[j]-1 ]; + for(auto j = 0u; j < r; ++j) + nc_base[j] = na[pia[j]-1]; - for(auto j = 0u; j < s; ++j) - nc_base[ r + j ] = nb[ pib_inv[j]-1 ]; + for(auto j = 0u; j < s; ++j) + nc_base[r+j] = nb[ pib_inv[j]-1 ]; - auto nc = extents_type ( nc_base ); - auto wc = strides_type ( nc ); - auto c = vector_type ( product(nc), value_type(0) ); + auto nc = extents_t ( nc_base ); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t ( ublas::product(nc), value_t(0) ); - ublas::ttt(pa,pb,q, - pia.data(), pib_inv.data(), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + ublas::ttt(pa,pb,q, + pia.data(), pib_inv.data(), + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); - auto acc = value_type(1); - for(auto j = r; j < pa; ++j) - acc *= value_type( static_cast< inner_type_t >(na[pia[j]-1]) ); + auto acc = std::size_t{1}; + for(auto j = r; j < pa; ++j) + acc *= na[pia[j]-1]; - for(auto j = 0ul; j < c.size(); ++j) - BOOST_CHECK_EQUAL( c[j] , acc * a[0] * b[0] ); + auto v = value_t(acc)*a[0]*b[0]; - } + BOOST_CHECK( std::all_of(c.begin(),c.end(), [v](auto cc){return cc == v; } ) ); - std::next_permutation(pib.begin(), pib.end()); - pib_inv = compute_inverse_permutation(pib); - } + } + + std::next_permutation(pib.begin(), pib.end()); + pib_inv = compute_inverse_permutation(pib); } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttt, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename strides_type::value_type; - - // left-hand and right-hand side have the - // the same number of elements + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - // computing the inner product with - // different permutation tuples for - // right-hand side + // left-hand and right-hand side have the + // the same number of elements - for(auto const& na : extents) { + // computing the inner product with + // different permutation tuples for + // right-hand side - auto wa = strides_type(na); - auto a = vector_type(product(na), value_type{2}); - auto pa = na.size(); + for(auto const& na : extents) { - auto nb = na; - auto wb = strides_type(nb); - auto b = vector_type(product(nb), value_type{3}); - auto pb = nb.size(); + auto wa = ublas::to_strides(na,layout_t{}); + auto a = vector_t(ublas::product(na), value_t{2}); + auto pa = ublas::size(na); - // std::cout << "na = "; - // std::copy(na.begin(), na.end(), std::ostream_iterator(std::cout, " ")); - // std::cout << std::endl; + auto const& nb = na; + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t(ublas::product(nb), value_t{3}); + auto pb = ublas::size(nb); - // std::cout << "nb = "; - // std::copy(nb.begin(), nb.end(), std::ostream_iterator(std::cout, " ")); - // std::cout << std::endl; + // std::cout << "na = "; + // std::copy(na.begin(), na.end(), std::ostream_iterator(std::cout, " ")); + // std::cout << std::endl; + // std::cout << "nb = "; + // std::copy(nb.begin(), nb.end(), std::ostream_iterator(std::cout, " ")); + // std::cout << std::endl; - // the number of contractions is changed. - for( auto q = size_type(0); q <= pa; ++q) { // pa - auto r = pa - q; - auto s = pb - q; + // the number of contractions is changed. + for( auto q = std::size_t{0}; q <= pa; ++q) { // pa - auto pc = r+s > 0 ? std::max(r+s, size_type(2)) : size_type(2); + auto r = pa - q; + auto s = pb - q; - auto nc_base = std::vector( pc , 1 ); + auto pc = r+s > 0 ? std::max(std::size_t{r+s},std::size_t{2}) : std::size_t{2}; - for(auto i = 0u; i < r; ++i) - nc_base[ i ] = na[ i ]; + auto nc_base = extents_base_t(pc,std::size_t{1}); - for(auto i = 0u; i < s; ++i) - nc_base[ r + i ] = nb[ i ]; + for(auto i = 0u; i < r; ++i) + nc_base[i] = na[i]; - auto nc = extents_type ( nc_base ); - auto wc = strides_type ( nc ); - auto c = vector_type ( product(nc), value_type{0} ); + for(auto i = 0u; i < s; ++i) + nc_base[r+i] = nb[i]; - // std::cout << "nc = "; - // std::copy(nc.begin(), nc.end(), std::ostream_iterator(std::cout, " ")); - // std::cout << std::endl; + auto nc = extents_t ( nc_base ); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t ( ublas::product(nc), value_t{0} ); - ublas::ttt(pa,pb,q, - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + // std::cout << "nc = "; + // std::copy(nc.begin(), nc.end(), std::ostream_iterator(std::cout, " ")); + // std::cout << std::endl; + ublas::ttt(pa,pb,q, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); - auto acc = value_type(1); - for(auto i = r; i < pa; ++i) - acc *= value_type( static_cast< inner_type_t >(na[i]) ); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , acc * a[0] * b[0] ); + auto acc = std::size_t{1}; + for(auto i = r; i < pa; ++i) + acc *= na[i]; - } + auto v = value_t(acc)*a[0]*b[0]; + BOOST_CHECK( std::all_of(c.begin(),c.end(), [v](auto cc){return cc == v; } ) ); } + + } } @@ -423,70 +418,68 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttt, value, test_types, fixture ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_inner, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using strides_type = ublas::strides_t,layout_type>; - using vector_type = std::vector; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + for(auto const& n : extents) { - for(auto const& n : extents) { + auto a = vector_t(ublas::product(n), value_t{2}); + auto b = vector_t(ublas::product(n), value_t{3}); + auto w = ublas::to_strides(n,layout_t{}); - auto a = vector_type(product(n), value_type{2}); - auto b = vector_type(product(n), value_type{3}); - auto w = strides_type(n); + auto c = ublas::inner(ublas::size(n), n.data(), a.data(), w.data(), b.data(), w.data(), value_t(0)); + auto cref = std::inner_product(a.begin(), a.end(), b.begin(), value_t(0)); - auto c = ublas::inner(n.size(), n.data(), a.data(), w.data(), b.data(), w.data(), value_type(0)); - auto cref = std::inner_product(a.begin(), a.end(), b.begin(), value_type(0)); + BOOST_CHECK_EQUAL( c , cref ); - BOOST_CHECK_EQUAL( c , cref ); - - } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_outer, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using vector_type = std::vector; - + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using extents_t = ublas::extents<>; + using vector_t = std::vector; - for(auto const& na : extents) { - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); + for(auto const& na : extents) { - for(auto const& nb : extents) { + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); - auto b = vector_type(product(nb), value_type{3}); - auto wb = strides_type(nb); + for(auto const& nb : extents) { - auto c = vector_type(product(nb)*product(na)); - auto nc = typename extents_type::base_type(na.size()+nb.size()); + auto b = vector_t(ublas::product(nb), value_t{3}); + auto wb = ublas::to_strides(nb,layout_t{}); - for(auto i = 0u; i < na.size(); ++i) - nc[i] = na[i]; - for(auto i = 0u; i < nb.size(); ++i) - nc[i+na.size()] = nb[i]; + auto c = vector_t(ublas::product(nb)*ublas::product(na)); + auto nc_base = typename extents_t::base_type(ublas::size(na)+ublas::size(nb)); - auto wc = strides_type(extents_type(nc)); + for(auto i = 0u; i < ublas::size(na); ++i) + nc_base[i] = na[i]; + for(auto i = 0u; i < ublas::size(nb); ++i) + nc_base[i+ublas::size(na)] = nb[i]; - ublas::outer(c.data(), nc.size(), nc.data(), wc.data(), - a.data(), na.size(), na.data(), wa.data(), - b.data(), nb.size(), nb.data(), wb.data()); + auto nc = extents_t(nc_base); + auto wc = ublas::to_strides(nc,layout_t{}); - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - } + ublas::outer(c.data(), ublas::size(nc), nc.data(), wc.data(), + a.data(), ublas::size(na), na.data(), wa.data(), + b.data(), ublas::size(nb), nb.data(), wb.data()); + for(auto const& cc : c) + BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); } + } + } diff --git a/test/tensor/test_operators_arithmetic.cpp b/test/tensor/test_operators_arithmetic.cpp index 723fffe11..98484ccf7 100644 --- a/test/tensor/test_operators_arithmetic.cpp +++ b/test/tensor/test_operators_arithmetic.cpp @@ -17,7 +17,7 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_tensor_arithmetic_operations, * boost::unit_test::depends_on("test_tensor")) +BOOST_AUTO_TEST_SUITE(test_tensor_arithmetic_operations/*, * boost::unit_test::depends_on("test_tensor")*/) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; @@ -25,22 +25,21 @@ using test_types = zip::with_t; - fixture() - : extents{ - extents_type{}, // 0 - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{4,1,3}, // 6 - extents_type{1,2,3}, // 7 - extents_type{4,2,3}, // 8 - extents_type{4,2,3,5}} // 9 + using extents_type = boost::numeric::ublas::extents<>; + + std::vector extents = { - } - std::vector extents; +// extents_type{}, // 0 + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{4,1,3}, // 6 + extents_type{1,2,3}, // 7 + extents_type{4,2,3}, // 8 + extents_type{4,2,3,5} // 9 + }; }; @@ -48,10 +47,10 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) @@ -86,11 +85,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, valu BOOST_CHECK_EQUAL ( r(i), 4 ); - r = tensor_type (e,1) + tensor_type (e,1); - - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2 ); - r = t * t * t * t2; for(auto i = 0ul; i < t.size(); ++i) @@ -117,10 +111,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, valu BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) @@ -180,10 +174,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) diff --git a/test/tensor/test_operators_comparison.cpp b/test/tensor/test_operators_comparison.cpp index 6e4932f41..b6aeb191a 100644 --- a/test/tensor/test_operators_comparison.cpp +++ b/test/tensor/test_operators_comparison.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Cem Bassoy +// Copyright (c) 2018 Cem Bassoy // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -19,155 +19,155 @@ #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_tensor_comparison, * boost::unit_test::depends_on("test_tensor")) +BOOST_AUTO_TEST_SUITE(test_tensor_comparison/*, * boost::unit_test::depends_on("test_tensor")*/) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; using test_types = zip::with_t; struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents{ - extents_type{}, // 0 - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{4,1,3}, // 6 - extents_type{1,2,3}, // 7 - extents_type{4,2,3}, // 8 - extents_type{4,2,3,5}} // 9 - { - } - std::vector extents; + using extents_type = boost::numeric::ublas::extents<>; + fixture() + : extents{ + // extents_type{}, // 0 + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{4,1,3}, // 6 + extents_type{1,2,3}, // 7 + extents_type{4,2,3}, // 8 + extents_type{4,2,3,5}} // 9 + { + } + std::vector extents; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - - auto check = [](auto const& e) - { - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; - - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); - - if(t.empty()) - return; - - BOOST_CHECK(!(t < t)); - BOOST_CHECK(!(t > t)); - BOOST_CHECK( t < t2 ); - BOOST_CHECK( t2 > t ); - BOOST_CHECK( t <= t ); - BOOST_CHECK( t >= t ); - BOOST_CHECK( t <= t2 ); - BOOST_CHECK( t2 >= t ); - BOOST_CHECK( t2 >= t2 ); - BOOST_CHECK( t2 >= t ); - }; - - for(auto const& e : extents) - check(e); - - auto e0 = extents.at(0); - auto e1 = extents.at(1); - auto e2 = extents.at(2); - - - auto b = false; - BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e0))); - BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) == tensor_type(e2))); - BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e2))); - BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) != tensor_type(e2))); - - BOOST_CHECK_THROW ( b = (tensor_type(e1) >= tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW ( b = (tensor_type(e1) <= tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW ( b = (tensor_type(e1) < tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW ( b = (tensor_type(e1) > tensor_type(e2)), std::runtime_error ); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + + + auto check = [](auto const& e) + { + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto v = value_type {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK(!(t < t)); + BOOST_CHECK(!(t > t)); + BOOST_CHECK( t < t2 ); + BOOST_CHECK( t2 > t ); + BOOST_CHECK( t <= t ); + BOOST_CHECK( t >= t ); + BOOST_CHECK( t <= t2 ); + BOOST_CHECK( t2 >= t ); + BOOST_CHECK( t2 >= t2 ); + BOOST_CHECK( t2 >= t ); + }; + + for(auto const& e : extents) + check(e); + + auto e0 = extents.at(0); + auto e1 = extents.at(1); + auto e2 = extents.at(2); + + + auto b = false; + BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e0))); + BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) == tensor_type(e2))); + BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e2))); + BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) != tensor_type(e2))); + + BOOST_CHECK_THROW ( b = (tensor_type(e1) >= tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (tensor_type(e1) <= tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (tensor_type(e1) < tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (tensor_type(e1) > tensor_type(e2)), std::runtime_error ); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - - auto check = [](auto const& e) - { - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; - - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); - - if(t.empty()) - return; - - BOOST_CHECK( !(t < t) ); - BOOST_CHECK( !(t > t) ); - BOOST_CHECK( t < (t2+t) ); - BOOST_CHECK( (t2+t) > t ); - BOOST_CHECK( t <= (t+t) ); - BOOST_CHECK( (t+t2) >= t ); - BOOST_CHECK( (t2+t2+2) >= t); - BOOST_CHECK( 2*t2 > t ); - BOOST_CHECK( t < 2*t2 ); - BOOST_CHECK( 2*t2 > t); - BOOST_CHECK( 2*t2 >= t2 ); - BOOST_CHECK( t2 <= 2*t2); - BOOST_CHECK( 3*t2 >= t ); - - }; - - for(auto const& e : extents) - check(e); - - auto e0 = extents.at(0); - auto e1 = extents.at(1); - auto e2 = extents.at(2); - - auto b = false; - BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e0) + tensor_type(e0)) ); - BOOST_CHECK_NO_THROW (b = tensor_type(e1) == (tensor_type(e2) + tensor_type(e2)) ); - BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e2) + 2) ); - BOOST_CHECK_NO_THROW (b = tensor_type(e1) != (2 + tensor_type(e2)) ); - - BOOST_CHECK_NO_THROW (b = (tensor_type(e0) + tensor_type(e0)) == tensor_type(e0) ); - BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + tensor_type(e2)) == tensor_type(e1) ); - BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + 2) == tensor_type(e0) ); - BOOST_CHECK_NO_THROW (b = (2 + tensor_type(e2)) != tensor_type(e1) ); - - BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) <= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) > (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - - BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + 2), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) <= (2 + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + 3), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) > (4 + tensor_type(e2)), std::runtime_error ); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + + + auto check = [](auto const& e) + { + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto v = value_type {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK( !(t < t) ); + BOOST_CHECK( !(t > t) ); + BOOST_CHECK( t < (t2+t) ); + BOOST_CHECK( (t2+t) > t ); + BOOST_CHECK( t <= (t+t) ); + BOOST_CHECK( (t+t2) >= t ); + BOOST_CHECK( (t2+t2+2) >= t); + BOOST_CHECK( 2*t2 > t ); + BOOST_CHECK( t < 2*t2 ); + BOOST_CHECK( 2*t2 > t); + BOOST_CHECK( 2*t2 >= t2 ); + BOOST_CHECK( t2 <= 2*t2); + BOOST_CHECK( 3*t2 >= t ); + + }; + + for(auto const& e : extents) + check(e); + + auto e0 = extents.at(0); + auto e1 = extents.at(1); + auto e2 = extents.at(2); + + auto b = false; + BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e0) + tensor_type(e0)) ); + BOOST_CHECK_NO_THROW (b = tensor_type(e1) == (tensor_type(e2) + tensor_type(e2)) ); + BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e2) + 2) ); + BOOST_CHECK_NO_THROW (b = tensor_type(e1) != (2 + tensor_type(e2)) ); + + BOOST_CHECK_NO_THROW (b = (tensor_type(e0) + tensor_type(e0)) == tensor_type(e0) ); + BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + tensor_type(e2)) == tensor_type(e1) ); + BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + 2) == tensor_type(e0) ); + BOOST_CHECK_NO_THROW (b = (2 + tensor_type(e2)) != tensor_type(e1) ); + + BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) <= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) > (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + + BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + 2), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) <= (2 + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + 3), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) > (4 + tensor_type(e2)), std::runtime_error ); } @@ -175,70 +175,70 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - - auto check = [](auto const& e) - { - - BOOST_CHECK( tensor_type(e,value_type{2}) == tensor_type(e,value_type{2}) ); - BOOST_CHECK( tensor_type(e,value_type{2}) != tensor_type(e,value_type{1}) ); - - if(e.empty()) - return; - - BOOST_CHECK( !(tensor_type(e,2) < 2) ); - BOOST_CHECK( !(tensor_type(e,2) > 2) ); - BOOST_CHECK( (tensor_type(e,2) >= 2) ); - BOOST_CHECK( (tensor_type(e,2) <= 2) ); - BOOST_CHECK( (tensor_type(e,2) == 2) ); - BOOST_CHECK( (tensor_type(e,2) != 3) ); - - BOOST_CHECK( !(2 > tensor_type(e,2)) ); - BOOST_CHECK( !(2 < tensor_type(e,2)) ); - BOOST_CHECK( (2 <= tensor_type(e,2)) ); - BOOST_CHECK( (2 >= tensor_type(e,2)) ); - BOOST_CHECK( (2 == tensor_type(e,2)) ); - BOOST_CHECK( (3 != tensor_type(e,2)) ); - - BOOST_CHECK( !( tensor_type(e,2)+3 < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+3 > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 != 6) ); - - - BOOST_CHECK( !( 5 > tensor_type(e,2)+3) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+3) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+3) ); - - - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) != 6) ); - - - BOOST_CHECK( !( 5 > tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+tensor_type(e,3)) ); - - }; - - for(auto const& e : extents) - check(e); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + + + auto check = [](auto const& e) + { + + BOOST_CHECK( tensor_type(e,value_type{2}) == tensor_type(e,value_type{2}) ); + BOOST_CHECK( tensor_type(e,value_type{2}) != tensor_type(e,value_type{1}) ); + + if(ublas::empty(e)) + return; + + BOOST_CHECK( !(tensor_type(e,2) < 2) ); + BOOST_CHECK( !(tensor_type(e,2) > 2) ); + BOOST_CHECK( (tensor_type(e,2) >= 2) ); + BOOST_CHECK( (tensor_type(e,2) <= 2) ); + BOOST_CHECK( (tensor_type(e,2) == 2) ); + BOOST_CHECK( (tensor_type(e,2) != 3) ); + + BOOST_CHECK( !(2 > tensor_type(e,2)) ); + BOOST_CHECK( !(2 < tensor_type(e,2)) ); + BOOST_CHECK( (2 <= tensor_type(e,2)) ); + BOOST_CHECK( (2 >= tensor_type(e,2)) ); + BOOST_CHECK( (2 == tensor_type(e,2)) ); + BOOST_CHECK( (3 != tensor_type(e,2)) ); + + BOOST_CHECK( !( tensor_type(e,2)+3 < 5) ); + BOOST_CHECK( !( tensor_type(e,2)+3 > 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 >= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 <= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 == 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 != 6) ); + + + BOOST_CHECK( !( 5 > tensor_type(e,2)+3) ); + BOOST_CHECK( !( 5 < tensor_type(e,2)+3) ); + BOOST_CHECK( ( 5 >= tensor_type(e,2)+3) ); + BOOST_CHECK( ( 5 <= tensor_type(e,2)+3) ); + BOOST_CHECK( ( 5 == tensor_type(e,2)+3) ); + BOOST_CHECK( ( 6 != tensor_type(e,2)+3) ); + + + BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) < 5) ); + BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) > 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) >= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) <= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) == 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) != 6) ); + + + BOOST_CHECK( !( 5 > tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( !( 5 < tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 5 >= tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 5 <= tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 5 == tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 6 != tensor_type(e,2)+tensor_type(e,3)) ); + + }; + + for(auto const& e : extents) + check(e); } diff --git a/test/tensor/test_static_expression_evaluation.cpp b/test/tensor/test_static_expression_evaluation.cpp index df66b1ee3..7412eef01 100644 --- a/test/tensor/test_static_expression_evaluation.cpp +++ b/test/tensor/test_static_expression_evaluation.cpp @@ -18,42 +18,42 @@ #include -BOOST_AUTO_TEST_SUITE(test_static_tensor_expression) +BOOST_AUTO_TEST_SUITE(test_tensor_static_expression) using test_types = zip>::with_t; struct fixture { - template - using extents_type = boost::numeric::ublas::static_extents; - - std::tuple< - extents_type<1,1>, // 1 - extents_type<2,3>, // 2 - extents_type<4,1,3>, // 3 - extents_type<4,2,3>, // 4 - extents_type<4,2,3,5> // 5 + template + using extents_type = boost::numeric::ublas::extents; + + std::tuple< + extents_type<1,1>, // 1 + extents_type<2,3>, // 2 + extents_type<4,1,3>, // 3 + extents_type<4,2,3>, // 4 + extents_type<4,2,3,5> // 5 > extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_retrieve_extents, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_expression_retrieve_extents, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); + auto uplus1 = [](auto const& a){return a + value_type(1);}; + auto uplus2 = [](auto const& a){return value_type(2) + a;}; auto bplus = std::plus {}; auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto& e){ + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; - auto t = tensor_type{}; + auto t = tensor_type(); auto v = value_type{}; for(auto& tt: t){ tt = v; v+=value_type{1}; } @@ -82,23 +82,23 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_retrieve_extents }); - for_each_tuple(extents, [&](auto I, auto& e1){ + for_each_in_tuple(extents, [&](auto I, auto& e1){ if ( I >= std::tuple_size_v - 1){ return; } using extents_type1 = std::decay_t; - using tensor_type1 = ublas::static_tensor; + using tensor_type1 = ublas::tensor_static; - for_each_tuple(extents, [&](auto J, auto& e2){ + for_each_in_tuple(extents, [&](auto J, auto& e2){ if( J != I + 1 ){ return; } using extents_type2 = std::decay_t; - using tensor_type2 = ublas::static_tensor; + using tensor_type2 = ublas::tensor_static; auto v = value_type{}; @@ -129,20 +129,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_retrieve_extents -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_all_extents_equal, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_expression_all_extents_equal, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); + auto uplus1 = [](auto const& a){return a + value_type(1);}; + auto uplus2 = [](auto const& a){return value_type(2) + a;}; auto bplus = std::plus {}; auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto& e){ + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type{}; @@ -175,23 +175,23 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_all_extents_equa }); - for_each_tuple(extents, [&](auto I, auto& e1){ + for_each_in_tuple(extents, [&](auto I, auto& e1){ if ( I >= std::tuple_size_v - 1){ return; } using extents_type1 = std::decay_t; - using tensor_type1 = ublas::static_tensor; + using tensor_type1 = ublas::tensor_static; - for_each_tuple(extents, [&](auto J, auto& e2){ + for_each_in_tuple(extents, [&](auto J, auto& e2){ if( J != I + 1 ){ return; } using extents_type2 = std::decay_t; - using tensor_type2 = ublas::static_tensor; + using tensor_type2 = ublas::tensor_static; auto v = value_type{}; @@ -217,4 +217,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_all_extents_equa } -BOOST_AUTO_TEST_SUITE_END(); +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_static_extents.cpp b/test/tensor/test_static_extents.cpp index e44040083..c92794886 100644 --- a/test/tensor/test_static_extents.cpp +++ b/test/tensor/test_static_extents.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,477 +10,356 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // -#include +#include #include #include #include "utility.hpp" BOOST_AUTO_TEST_SUITE(test_static_extents) -template -using extents = boost::numeric::ublas::basic_static_extents; -BOOST_AUTO_TEST_CASE(test_static_extents_ctor) { - using namespace boost::numeric; +struct fixture +{ + template + using extents = boost::numeric::ublas::extents; + + extents<> e0 {}; + extents<1> e1 {}; + extents<1, 1> e11 {}; + extents<2, 1> e21 {}; + extents<1, 2> e12 {}; + extents<2, 3> e23 {}; + extents<2, 1, 1> e211 {}; + extents<2, 3, 1> e231 {}; + extents<1, 2, 3> e123 {}; + extents<4, 2, 3> e423 {}; + extents<1, 2, 3, 4> e1234 {}; + extents<4, 2, 1, 3> e4213 {}; + extents<1, 2, 3, 4, 1> e12341 {}; + extents<4, 2, 1, 3, 1> e42131 {}; + extents<1, 4, 2, 1, 3, 1> e142131 {}; +}; - auto e0 = extents<>{}; - BOOST_CHECK(e0.empty()); - BOOST_CHECK_EQUAL(e0.size(), 0); +BOOST_FIXTURE_TEST_CASE(test_extents_static_ctor, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("ctor")) +{ - auto e1 = extents<1, 2>{}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK_EQUAL(e1.size(), 2); + namespace ublas = boost::numeric::ublas; + + BOOST_CHECK( ublas::empty( e0)); + BOOST_CHECK(! ublas::empty( e1)); + BOOST_CHECK(! ublas::empty( e11)); + BOOST_CHECK(! ublas::empty( e12)); + BOOST_CHECK(! ublas::empty( e21)); + BOOST_CHECK(! ublas::empty( e23)); + BOOST_CHECK(! ublas::empty( e211)); + BOOST_CHECK(! ublas::empty( e123)); + BOOST_CHECK(! ublas::empty( e423)); + BOOST_CHECK(! ublas::empty( e1234)); + BOOST_CHECK(! ublas::empty( e4213)); + BOOST_CHECK(! ublas::empty(e142131)); + + BOOST_CHECK_EQUAL( ublas::size( e0),0); + BOOST_CHECK_EQUAL( ublas::size( e1),1); + BOOST_CHECK_EQUAL( ublas::size( e11),2); + BOOST_CHECK_EQUAL( ublas::size( e12),2); + BOOST_CHECK_EQUAL( ublas::size( e21),2); + BOOST_CHECK_EQUAL( ublas::size( e23),2); + BOOST_CHECK_EQUAL( ublas::size( e211),3); + BOOST_CHECK_EQUAL( ublas::size( e123),3); + BOOST_CHECK_EQUAL( ublas::size( e423),3); + BOOST_CHECK_EQUAL( ublas::size( e1234),4); + BOOST_CHECK_EQUAL( ublas::size( e4213),4); + BOOST_CHECK_EQUAL( ublas::size(e142131),6); + + + BOOST_CHECK_EQUAL( ublas::size_v,0); + BOOST_CHECK_EQUAL( ublas::size_v,1); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,3); + BOOST_CHECK_EQUAL( ublas::size_v,3); + BOOST_CHECK_EQUAL( ublas::size_v,3); + BOOST_CHECK_EQUAL( ublas::size_v,4); + BOOST_CHECK_EQUAL( ublas::size_v,4); + BOOST_CHECK_EQUAL( ublas::size_v,6); - auto e2 = extents<2, 3>{}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK_EQUAL(e2.size(), 2); +} - auto e3 = extents<4, 2, 3>{}; // 7 - BOOST_CHECK(!e3.empty()); - BOOST_CHECK_EQUAL(e3.size(), 3); +BOOST_FIXTURE_TEST_CASE(test_extents_static_product, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("product")) +{ + + namespace ublas = boost::numeric::ublas; + + BOOST_CHECK_EQUAL(ublas::product( e0), 0); + //FIXME: BOOST_CHECK_EQUAL(ublas::product( e1), 1); + BOOST_CHECK_EQUAL(ublas::product( e11), 1); + BOOST_CHECK_EQUAL(ublas::product( e12), 2); + BOOST_CHECK_EQUAL(ublas::product( e21), 2); + BOOST_CHECK_EQUAL(ublas::product( e23), 6); + BOOST_CHECK_EQUAL(ublas::product( e211), 2); + BOOST_CHECK_EQUAL(ublas::product( e123), 6); + BOOST_CHECK_EQUAL(ublas::product( e423), 24); + BOOST_CHECK_EQUAL(ublas::product( e1234), 24); + BOOST_CHECK_EQUAL(ublas::product( e4213), 24); + BOOST_CHECK_EQUAL(ublas::product(e142131), 24); + + + BOOST_CHECK_EQUAL(ublas::product_v, 0); + BOOST_CHECK_EQUAL(ublas::product_v, 1); + BOOST_CHECK_EQUAL(ublas::product_v, 1); + BOOST_CHECK_EQUAL(ublas::product_v, 2); + BOOST_CHECK_EQUAL(ublas::product_v, 2); + BOOST_CHECK_EQUAL(ublas::product_v, 6); + BOOST_CHECK_EQUAL(ublas::product_v, 2); + BOOST_CHECK_EQUAL(ublas::product_v, 6); + BOOST_CHECK_EQUAL(ublas::product_v, 24); + BOOST_CHECK_EQUAL(ublas::product_v, 24); + BOOST_CHECK_EQUAL(ublas::product_v, 24); + BOOST_CHECK_EQUAL(ublas::product_v, 24); } -struct fixture { - fixture() = default; - extents<> e0{}; // 0 - extents<1, 2, 3, 4> e1{}; // 1 - extents<1, 2, 3> e2{}; // 2 - extents<4, 2, 3> e3{}; // 3 - extents<4, 2, 1, 3> e4{}; // 4 - extents<1, 4, 2, 1, 3, 1> e5{}; // 5 +BOOST_FIXTURE_TEST_CASE(test_static_extents_access, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("access")) +{ + namespace ublas = boost::numeric::ublas; - std::tuple< - extents<> - > rank_0_extents; + BOOST_REQUIRE_EQUAL( ublas::size_v,0); + BOOST_REQUIRE_EQUAL( ublas::size_v,1); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,3); + BOOST_REQUIRE_EQUAL( ublas::size_v,3); + BOOST_REQUIRE_EQUAL( ublas::size_v,3); + BOOST_REQUIRE_EQUAL( ublas::size_v,4); + BOOST_REQUIRE_EQUAL( ublas::size_v,4); + BOOST_REQUIRE_EQUAL( ublas::size_v,6); - std::tuple< - extents<1>, - extents<2> - > rank_1_extents; + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + BOOST_CHECK_EQUAL((ublas::get_v), 4); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + BOOST_CHECK_EQUAL((ublas::get_v), 4); + + BOOST_CHECK_EQUAL((ublas::get_v), 4); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + //FIXME: BOOST_CHECK_EQUAL(e1 [0], 1); + + BOOST_CHECK_EQUAL(e11[0], 1); + BOOST_CHECK_EQUAL(e11[1], 1); + + BOOST_CHECK_EQUAL(e12[0], 1); + BOOST_CHECK_EQUAL(e12[1], 2); + + BOOST_CHECK_EQUAL(e21[0], 2); + BOOST_CHECK_EQUAL(e21[1], 1); + + BOOST_CHECK_EQUAL(e23[0], 2); + BOOST_CHECK_EQUAL(e23[1], 3); + + BOOST_CHECK_EQUAL(e211[0], 2); + BOOST_CHECK_EQUAL(e211[1], 1); + BOOST_CHECK_EQUAL(e211[2], 1); + + BOOST_CHECK_EQUAL(e123[0], 1); + BOOST_CHECK_EQUAL(e123[1], 2); + BOOST_CHECK_EQUAL(e123[2], 3); + + BOOST_CHECK_EQUAL(e423[0], 4); + BOOST_CHECK_EQUAL(e423[1], 2); + BOOST_CHECK_EQUAL(e423[2], 3); + + BOOST_CHECK_EQUAL(e1234[0], 1); + BOOST_CHECK_EQUAL(e1234[1], 2); + BOOST_CHECK_EQUAL(e1234[2], 3); + BOOST_CHECK_EQUAL(e1234[3], 4); + + BOOST_CHECK_EQUAL(e4213[0], 4); + BOOST_CHECK_EQUAL(e4213[1], 2); + BOOST_CHECK_EQUAL(e4213[2], 1); + BOOST_CHECK_EQUAL(e4213[3], 3); +} + +struct fixture_second +{ + template + using extents = boost::numeric::ublas::extents; std::tuple< - extents<1,1>, - extents<2,2> - > rank_2_extents; + extents<> + > empty; std::tuple< - extents<1>, + //FIXME: extents<1>, extents<1,1>, extents<1,1,1>, extents<1,1,1,1> - > scalars; + > scalars; std::tuple< extents<1,2>, - extents<1,3,1>, + extents<2,1>, + extents<1,2,1>, + extents<2,1,1>, extents<1,4,1,1>, - extents<5,1,1,1,1>, - extents<6,1,1,1,1,1> - > vectors; + extents<5,1,1,1,1> + > vectors; std::tuple< extents<2,3>, extents<3,2,1>, extents<4,4,1,1>, extents<6,6,1,1,1,1> - > matrices; + > matrices; std::tuple< - extents<3,3,3>, - extents<4,4,4,1>, - extents<5,5,5,1,1>, + extents<1,2,3>, + extents<1,2,3>, + extents<1,2,3,1>, + extents<4,2,3>, + extents<4,2,3,1>, + extents<4,2,3,1,1>, extents<6,6,6,1,1,1>, extents<6,6,1,1,1,6> - > tensors; + > tensors; }; -BOOST_FIXTURE_TEST_CASE(test_static_extents_product, fixture, - *boost::unit_test::label("static_extents") * - boost::unit_test::label("product")) { - - using namespace boost::numeric::ublas; - - auto p0 = product(e0); // {} - auto p1 = product(e1); // {1,2,3,4} - auto p2 = product(e2); // {1,2,3} - auto p3 = product(e3); // {4,2,3} - auto p4 = product(e4); // {4,2,1,3} - auto p5 = product(e5); // {1,4,2,1,3,1} - - auto sp0 = product(e0); // {} - auto sp1 = product(e1); // {1,2,3,4} - auto sp2 = product(e2); // {1,2,3} - auto sp3 = product(e3); // {4,2,3} - auto sp4 = product(e4); // {4,2,1,3} - auto sp5 = product(e5); // {1,4,2,1,3,1} - - BOOST_CHECK_EQUAL(p0, 0); - BOOST_CHECK_EQUAL(p1, 24); - BOOST_CHECK_EQUAL(p2, 6); - BOOST_CHECK_EQUAL(p3, 24); - BOOST_CHECK_EQUAL(p4, 24); - BOOST_CHECK_EQUAL(p5, 24); - - BOOST_CHECK_EQUAL(sp0, 0); - BOOST_CHECK_EQUAL(sp1, 24); - BOOST_CHECK_EQUAL(sp2, 6); - BOOST_CHECK_EQUAL(sp3, 24); - BOOST_CHECK_EQUAL(sp4, 24); - BOOST_CHECK_EQUAL(sp5, 24); -} -BOOST_FIXTURE_TEST_CASE(test_static_extents_access, fixture, - *boost::unit_test::label("static_extents") * - boost::unit_test::label("access")) { - using namespace boost::numeric; - - BOOST_CHECK_EQUAL(e0.size(), 0); - BOOST_CHECK(e0.empty()); - - BOOST_REQUIRE_EQUAL(e1.size(), 4); - BOOST_REQUIRE_EQUAL(e2.size(), 3); - BOOST_REQUIRE_EQUAL(e3.size(), 3); - BOOST_REQUIRE_EQUAL(e4.size(), 4); - BOOST_REQUIRE_EQUAL(e5.size(), 6); - - BOOST_CHECK_EQUAL(e1[0], 1); - BOOST_CHECK_EQUAL(e1[1], 2); - BOOST_CHECK_EQUAL(e1[2], 3); - BOOST_CHECK_EQUAL(e1[3], 4); - - BOOST_CHECK_EQUAL(e2[0], 1); - BOOST_CHECK_EQUAL(e2[1], 2); - BOOST_CHECK_EQUAL(e2[2], 3); - - BOOST_CHECK_EQUAL(e3[0], 4); - BOOST_CHECK_EQUAL(e3[1], 2); - BOOST_CHECK_EQUAL(e3[2], 3); - - BOOST_CHECK_EQUAL(e4[0], 4); - BOOST_CHECK_EQUAL(e4[1], 2); - BOOST_CHECK_EQUAL(e4[2], 1); - BOOST_CHECK_EQUAL(e4[3], 3); - - BOOST_CHECK_EQUAL(e5[0], 1); - BOOST_CHECK_EQUAL(e5[1], 4); - BOOST_CHECK_EQUAL(e5[2], 2); - BOOST_CHECK_EQUAL(e5[3], 1); - BOOST_CHECK_EQUAL(e5[4], 3); - BOOST_CHECK_EQUAL(e5[5], 1); -} +BOOST_FIXTURE_TEST_CASE(test_static_extents, fixture_second, + *boost::unit_test::label("extents_static") *boost::unit_test::label("is_scalar_vector_matrix_tensor")) { -BOOST_FIXTURE_TEST_CASE(test_static_extents, fixture, - *boost::unit_test::label("static_extents") * - boost::unit_test::label("query")) { - - using namespace boost::numeric::ublas; - // e0 ==> {} - // e1 ==> {0,0,0,0} - // e2 ==> {1,2,3} - // e3 ==> {4,2,3} - // e4 ==> {4,2,1,3} - // e5 ==> {1,4,2,1,3,1} - - BOOST_CHECK( e0.empty( )); - BOOST_CHECK( !is_scalar( e0)); - BOOST_CHECK( !is_vector( e0)); - BOOST_CHECK( !is_matrix( e0)); - BOOST_CHECK( !is_tensor( e0)); - BOOST_CHECK( !is_scalar( e0)); - BOOST_CHECK( !is_vector( e0)); - BOOST_CHECK( !is_matrix( e0)); - BOOST_CHECK( !is_tensor( e0)); - - BOOST_CHECK( ! e1.empty( ) ); - BOOST_CHECK( !is_scalar( e1) ); - BOOST_CHECK( !is_vector( e1) ); - BOOST_CHECK( !is_matrix( e1) ); - BOOST_CHECK( is_tensor( e1) ); - BOOST_CHECK( !is_scalar( e1) ); - BOOST_CHECK( !is_vector( e1) ); - BOOST_CHECK( !is_matrix( e1) ); - BOOST_CHECK( is_tensor( e1) ); - - BOOST_CHECK( ! e2.empty( ) ); - BOOST_CHECK( !is_scalar( e2) ); - BOOST_CHECK( !is_vector( e2) ); - BOOST_CHECK( !is_matrix( e2) ); - BOOST_CHECK( is_tensor( e2) ); - BOOST_CHECK( !is_scalar( e2) ); - BOOST_CHECK( !is_vector( e2) ); - BOOST_CHECK( !is_matrix( e2) ); - BOOST_CHECK( is_tensor( e2) ); - - BOOST_CHECK( ! e3.empty( ) ); - BOOST_CHECK( !is_scalar( e3) ); - BOOST_CHECK( !is_vector( e3) ); - BOOST_CHECK( !is_matrix( e3) ); - BOOST_CHECK( is_tensor( e3) ); - BOOST_CHECK( !is_scalar( e3) ); - BOOST_CHECK( !is_vector( e3) ); - BOOST_CHECK( !is_matrix( e3) ); - BOOST_CHECK( is_tensor( e3) ); - - BOOST_CHECK( ! e4.empty( ) ); - BOOST_CHECK( !is_scalar( e4) ); - BOOST_CHECK( !is_vector( e4) ); - BOOST_CHECK( !is_matrix( e4) ); - BOOST_CHECK( is_tensor( e4) ); - BOOST_CHECK( !is_scalar( e4) ); - BOOST_CHECK( !is_vector( e4) ); - BOOST_CHECK( !is_matrix( e4) ); - BOOST_CHECK( is_tensor( e4) ); - - BOOST_CHECK( ! e5.empty( ) ); - BOOST_CHECK( !is_scalar( e5) ); - BOOST_CHECK( !is_vector( e5) ); - BOOST_CHECK( !is_matrix( e5) ); - BOOST_CHECK( is_tensor( e5) ); - BOOST_CHECK( !is_scalar( e5) ); - BOOST_CHECK( !is_vector( e5) ); - BOOST_CHECK( !is_matrix( e5) ); - BOOST_CHECK( is_tensor( e5) ); - - boost::numeric::ublas::basic_static_extents e14; - BOOST_CHECK( ! e14.empty( ) ); - BOOST_CHECK( ! is_scalar(e14) ); - BOOST_CHECK( is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - BOOST_CHECK( ! is_scalar(e14) ); - BOOST_CHECK( is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - - - for_each_tuple(rank_0_extents,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }); + namespace ublas = boost::numeric::ublas; + for_each_in_tuple(scalars,[](auto const& /*unused*/, auto const& e){ + BOOST_CHECK( ublas::is_scalar(e) ); + BOOST_CHECK( ublas::is_vector(e) ); + BOOST_CHECK( ublas::is_matrix(e) ); + BOOST_CHECK( !ublas::is_tensor(e) ); - for_each_tuple(rank_1_extents,[](auto const& I, auto const& e){ - if( I == 0 ){ - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }else{ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - } - }); + BOOST_CHECK( ublas::is_scalar_v); + BOOST_CHECK( ublas::is_vector_v); + BOOST_CHECK( ublas::is_matrix_v); + BOOST_CHECK( !ublas::is_tensor_v); - for_each_tuple(rank_2_extents,[](auto const& I, auto const& e){ - if( I == 0 ){ - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }else{ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - } }); - for_each_tuple(scalars,[](auto const&, auto& e){ - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }); + for_each_in_tuple(vectors,[](auto const& /*unused*/, auto& e){ + BOOST_CHECK( !ublas::is_scalar(e) ); + BOOST_CHECK( ublas::is_vector(e) ); + BOOST_CHECK( ublas::is_matrix(e) ); + BOOST_CHECK( !ublas::is_tensor(e) ); - for_each_tuple(vectors,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); + BOOST_CHECK( !ublas::is_scalar_v); + BOOST_CHECK( ublas::is_vector_v); + BOOST_CHECK( ublas::is_matrix_v); + BOOST_CHECK( !ublas::is_tensor_v); }); - for_each_tuple(matrices,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - - }); + for_each_in_tuple(matrices,[](auto const& /*unused*/, auto& e){ + BOOST_CHECK( !ublas::is_scalar(e) ); + BOOST_CHECK( !ublas::is_vector(e) ); + BOOST_CHECK( ublas::is_matrix(e) ); + BOOST_CHECK( !ublas::is_tensor(e) ); - for_each_tuple(tensors,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( is_tensor(e) ); + BOOST_CHECK( !ublas::is_scalar_v); + BOOST_CHECK( !ublas::is_vector_v); + BOOST_CHECK( ublas::is_matrix_v); + BOOST_CHECK( !ublas::is_tensor_v); }); -} + for_each_in_tuple(tensors,[](auto const& /*unused*/, auto& e){ + BOOST_CHECK( !ublas::is_scalar(e) ); + BOOST_CHECK( !ublas::is_vector(e) ); + BOOST_CHECK( !ublas::is_matrix(e) ); + BOOST_CHECK( ublas::is_tensor(e) ); -BOOST_FIXTURE_TEST_CASE(test_static_extents_valid, fixture, *boost::unit_test::label("static_extents") *boost::unit_test::label("valid")) -{ - using namespace boost::numeric::ublas; - for_each_tuple(rank_0_extents,[](auto const&, auto& e){ - BOOST_CHECK(!is_valid(e)); - BOOST_CHECK(!is_valid(e)); + BOOST_CHECK( !ublas::is_scalar_v); + BOOST_CHECK( !ublas::is_vector_v); + BOOST_CHECK( !ublas::is_matrix_v); + BOOST_CHECK( ublas::is_tensor_v); }); - for_each_tuple(rank_1_extents,[](auto const& I, auto const& e){ - if( I== 0 ){ - BOOST_CHECK(is_valid(e)); - }else{ - BOOST_CHECK(!is_valid(e)); - BOOST_CHECK(!is_valid(e)); - } - }); - - for_each_tuple(rank_2_extents,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(scalars,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(vectors,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(matrices,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(tensors,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); } -BOOST_FIXTURE_TEST_CASE(test_static_extents_comparsion_operator, fixture, *boost::unit_test::label("static_extents") *boost::unit_test::label("comparsion_operator")) +BOOST_FIXTURE_TEST_CASE(test_static_extents_valid, fixture_second, + *boost::unit_test::label("extents_extents") *boost::unit_test::label("valid")) { + namespace ublas = boost::numeric::ublas; - auto const compare_extents = [](auto const& e1, auto const& e2){ - if(e1.size() != e2.size()) return false; - for(auto i = 0ul ; i < e1.size(); i++){ - if(e1[i] != e2[i]){ - return false; - } - } - return true; - }; - - for_each_tuple(rank_0_extents,[&](auto const&, auto const& e1){ - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); +//FIXME: BOOST_CHECK(!ublas::is_valid (extents<0>{}) ); +//FIXME: BOOST_CHECK( ublas::is_valid (extents<2>{}) ); +//FIXME: BOOST_CHECK( ublas::is_valid (extents<3>{}) ); - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e1){ - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); + BOOST_CHECK(!ublas::is_valid_v> ); + BOOST_CHECK( ublas::is_valid_v> ); + BOOST_CHECK( ublas::is_valid_v> ); - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e1){ - for_each_tuple(rank_2_extents,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); - - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(scalars,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); - - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(vectors,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(matrices,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); + for_each_in_tuple(scalars ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); + for_each_in_tuple(vectors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); + for_each_in_tuple(matrices ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); + for_each_in_tuple(tensors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(tensors,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); + for_each_in_tuple(scalars ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); + for_each_in_tuple(vectors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); + for_each_in_tuple(matrices ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); + for_each_in_tuple(tensors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); } -BOOST_FIXTURE_TEST_CASE(test_static_extents_squeeze, fixture, *boost::unit_test::label("static_extents") *boost::unit_test::label("squeeze")) -{ - auto e_sq2 = squeeze(e2 ) ;//==> {2,3} - auto e_sq3 = squeeze(e3 ) ;//==> {4,2,3} - auto e_sq4 = squeeze(e4 ) ;//==> {4,2,3} - auto e_sq5 = squeeze(e5 ) ;//==> {4,2,3} - - BOOST_CHECK( (e_sq2 == extents<2,3>{}) ); - BOOST_CHECK( (e_sq3 == extents<4,2,3>{}) ); - - BOOST_CHECK( (e_sq4 == extents<4,2,3>{}) ); - BOOST_CHECK( (e_sq5 == extents<4,2,3>{}) ); -} - -BOOST_AUTO_TEST_CASE(test_static_extents_exception) +BOOST_FIXTURE_TEST_CASE(test_static_extents_comparsion_operator, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("equals")) { - using namespace boost::numeric::ublas; - - basic_static_extents e1; - for(auto i = e1.size(); i < 3; i++){ - BOOST_REQUIRE_THROW( (void)e1.at(i),std::out_of_range ); - } - - BOOST_REQUIRE_THROW((void)e1.at(std::numeric_limits::max()),std::out_of_range); + namespace ublas = boost::numeric::ublas; + + BOOST_CHECK( e0 == e0 ); + BOOST_CHECK( e1 == e1 ); + BOOST_CHECK( e11 == e11 ); + BOOST_CHECK( e21 == e21 ); + BOOST_CHECK( e12 == e12 ); + BOOST_CHECK( e23 == e23 ); + BOOST_CHECK( e231 == e231 ); + BOOST_CHECK( e211 == e211 ); + BOOST_CHECK( e123 == e123 ); + BOOST_CHECK( e423 == e423 ); + BOOST_CHECK( e1234 == e1234 ); + BOOST_CHECK( e4213 == e4213 ); + BOOST_CHECK( e142131 == e142131 ); } diff --git a/test/tensor/test_static_operators_arithmetic.cpp b/test/tensor/test_static_operators_arithmetic.cpp index 97a75cc5d..71e1447ad 100644 --- a/test/tensor/test_static_operators_arithmetic.cpp +++ b/test/tensor/test_static_operators_arithmetic.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -18,7 +18,7 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_static_tensor_arithmetic_operations) +BOOST_AUTO_TEST_SUITE(test_tensor_static_arithmetic_operations) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; @@ -27,9 +27,9 @@ using test_types = zip::with_t - using extents_type = boost::numeric::ublas::static_extents; + using extents_type = boost::numeric::ublas::extents; - fixture() {} + fixture() = default; std::tuple< extents_type<1,1>, // 1 @@ -44,15 +44,15 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto r = tensor_type (); @@ -99,22 +99,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, valu BOOST_CHECK_EQUAL ( r(i), 1 ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto v = value_type {}; @@ -154,7 +154,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } @@ -163,15 +163,15 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto r = tensor_type (); @@ -235,7 +235,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, valu BOOST_CHECK_EQUAL ( p(i), r(i) ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } diff --git a/test/tensor/test_static_operators_comparison.cpp b/test/tensor/test_static_operators_comparison.cpp index 045cea23a..9010482d0 100644 --- a/test/tensor/test_static_operators_comparison.cpp +++ b/test/tensor/test_static_operators_comparison.cpp @@ -17,17 +17,18 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_static_tensor_comparison) +BOOST_AUTO_TEST_SUITE(test_tensor_static_comparison) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; using test_types = zip::with_t; struct fixture { + template - using extents_type = boost::numeric::ublas::static_extents; + using extents_type = boost::numeric::ublas::extents; - fixture() {} + fixture()= default; std::tuple< extents_type<1,1>, // 1 @@ -41,14 +42,14 @@ struct fixture { BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto v = value_type {}; @@ -74,22 +75,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fi BOOST_CHECK( t2 >= t ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); @@ -120,7 +121,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } @@ -128,20 +129,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; BOOST_CHECK( tensor_type(value_type{2}) == tensor_type(value_type{2}) ); BOOST_CHECK( tensor_type(value_type{2}) != tensor_type(value_type{1}) ); - if(e.empty()) + if(ublas::empty(e)) return; BOOST_CHECK( !(tensor_type(2) < 2) ); @@ -189,9 +190,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, te BOOST_CHECK( ( 5 == tensor_type(2)+tensor_type(3)) ); BOOST_CHECK( ( 6 != tensor_type(2)+tensor_type(3)) ); - }; - - for_each_tuple(extents,check); + }); } diff --git a/test/tensor/test_static_strides.cpp b/test/tensor/test_static_strides.cpp index 72b11c58c..5f5a203e9 100644 --- a/test/tensor/test_static_strides.cpp +++ b/test/tensor/test_static_strides.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,10 +10,7 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // -#include -#include -#include -#include +#include #include BOOST_AUTO_TEST_SUITE(test_static_strides) @@ -21,137 +18,139 @@ BOOST_AUTO_TEST_SUITE(test_static_strides) using test_types = std::tuple; -template -using extents_type = boost::numeric::ublas::basic_static_extents; -template -using strides_type = boost::numeric::ublas::strides_t; +template +using extents = boost::numeric::ublas::extents; + +using first_order = boost::numeric::ublas::layout::first_order; +using last_order = boost::numeric::ublas::layout::last_order; + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_static_strides_ctor, value, test_types) +{ + namespace ublas = boost::numeric::ublas; + + constexpr auto s11 = ublas::to_strides_v,first_order>; + constexpr auto s12 = ublas::to_strides_v,first_order>; + constexpr auto s21 = ublas::to_strides_v,first_order>; + constexpr auto s23 = ublas::to_strides_v,first_order>; + constexpr auto s231 = ublas::to_strides_v,first_order>; + constexpr auto s123 = ublas::to_strides_v,first_order>; + constexpr auto s423 = ublas::to_strides_v,first_order>; + + BOOST_CHECK_EQUAL(s11.empty(), false); + BOOST_CHECK_EQUAL(s12.empty(), false); + BOOST_CHECK_EQUAL(s21.empty(), false); + BOOST_CHECK_EQUAL(s23.empty(), false); + BOOST_CHECK_EQUAL(s231.empty(), false); + BOOST_CHECK_EQUAL(s123.empty(), false); + BOOST_CHECK_EQUAL(s423.empty(), false); + + BOOST_CHECK_EQUAL(s11.size(), 2); + BOOST_CHECK_EQUAL(s12.size(), 2); + BOOST_CHECK_EQUAL(s21.size(), 2); + BOOST_CHECK_EQUAL(s23.size(), 2); + BOOST_CHECK_EQUAL(s231.size(), 3); + BOOST_CHECK_EQUAL(s123.size(), 3); + BOOST_CHECK_EQUAL(s423.size(), 3); +} -BOOST_AUTO_TEST_CASE_TEMPLATE(test_static_strides_ctor, value, test_types) { - using namespace boost::numeric; +BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_first_order) +{ + namespace ublas = boost::numeric::ublas; - strides_type, ublas::layout::first_order> s1{}; - BOOST_CHECK_EQUAL(s1.empty(), false); - BOOST_CHECK_EQUAL(s1.size(), 2); + constexpr auto s11 = ublas::to_strides_v,first_order>; + constexpr auto s12 = ublas::to_strides_v,first_order>; + constexpr auto s21 = ublas::to_strides_v,first_order>; + constexpr auto s23 = ublas::to_strides_v,first_order>; + constexpr auto s231 = ublas::to_strides_v,first_order>; + constexpr auto s213 = ublas::to_strides_v,first_order>; + constexpr auto s123 = ublas::to_strides_v,first_order>; + constexpr auto s423 = ublas::to_strides_v,first_order>; - strides_type, ublas::layout::first_order> s2{}; - BOOST_CHECK_EQUAL(s2.empty(), false); - BOOST_CHECK_EQUAL(s2.size(), 2); + BOOST_REQUIRE_EQUAL(s11.size(), 2); + BOOST_REQUIRE_EQUAL(s12.size(), 2); + BOOST_REQUIRE_EQUAL(s21.size(), 2); + BOOST_REQUIRE_EQUAL(s23.size(), 2); + BOOST_REQUIRE_EQUAL(s231.size(), 3); + BOOST_REQUIRE_EQUAL(s213.size(), 3); + BOOST_REQUIRE_EQUAL(s123.size(), 3); + BOOST_REQUIRE_EQUAL(s423.size(), 3); - strides_type, ublas::layout::first_order> s3{}; - BOOST_CHECK_EQUAL(s3.empty(), false); - BOOST_CHECK_EQUAL(s3.size(), 2); - strides_type, ublas::layout::first_order> s4{}; - BOOST_CHECK_EQUAL(s4.empty(), false); - BOOST_CHECK_EQUAL(s4.size(), 2); + BOOST_CHECK_EQUAL(s11[0], 1); + BOOST_CHECK_EQUAL(s11[1], 1); - strides_type, ublas::layout::first_order> s5{}; - BOOST_CHECK_EQUAL(s5.empty(), false); - BOOST_CHECK_EQUAL(s5.size(), 3); + BOOST_CHECK_EQUAL(s12[0], 1); + BOOST_CHECK_EQUAL(s12[1], 1); - strides_type, ublas::layout::first_order> s6{}; - BOOST_CHECK_EQUAL(s6.empty(), false); - BOOST_CHECK_EQUAL(s6.size(), 3); + BOOST_CHECK_EQUAL(s21[0], 1); + BOOST_CHECK_EQUAL(s21[1], 2); // NOTE: is this the way we want to have it? - strides_type, ublas::layout::first_order> s7{}; - BOOST_CHECK_EQUAL(s7.empty(), false); - BOOST_CHECK_EQUAL(s7.size(), 3); -} + BOOST_CHECK_EQUAL(s23[0], 1); + BOOST_CHECK_EQUAL(s23[1], 2); + + BOOST_CHECK_EQUAL(s231[0], 1); + BOOST_CHECK_EQUAL(s231[1], 2); + BOOST_CHECK_EQUAL(s231[2], 6); + + BOOST_CHECK_EQUAL(s123[0], 1); + BOOST_CHECK_EQUAL(s123[1], 1); + BOOST_CHECK_EQUAL(s123[2], 2); -BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_first_order) { - using namespace boost::numeric; - - strides_type, ublas::layout::first_order> s1{}; - BOOST_REQUIRE_EQUAL(s1.size(), 2); - BOOST_CHECK_EQUAL(s1[0], 1); - BOOST_CHECK_EQUAL(s1[1], 1); - - strides_type, ublas::layout::first_order> s2{}; - BOOST_REQUIRE_EQUAL(s2.size(), 2); - BOOST_CHECK_EQUAL(s2[0], 1); - BOOST_CHECK_EQUAL(s2[1], 1); - - strides_type, ublas::layout::first_order> s3{}; - BOOST_REQUIRE_EQUAL(s3.size(), 2); - BOOST_CHECK_EQUAL(s3[0], 1); - BOOST_CHECK_EQUAL(s3[1], 1); - - strides_type, ublas::layout::first_order> s4{}; - BOOST_REQUIRE_EQUAL(s4.size(), 2); - BOOST_CHECK_EQUAL(s4[0], 1); - BOOST_CHECK_EQUAL(s4[1], 2); - - strides_type, ublas::layout::first_order> s5{}; - BOOST_REQUIRE_EQUAL(s5.size(), 3); - BOOST_CHECK_EQUAL(s5[0], 1); - BOOST_CHECK_EQUAL(s5[1], 2); - BOOST_CHECK_EQUAL(s5[2], 6); - - strides_type, ublas::layout::first_order> s6{}; - BOOST_REQUIRE_EQUAL(s6.size(), 3); - BOOST_CHECK_EQUAL(s6[0], 1); - BOOST_CHECK_EQUAL(s6[1], 1); - BOOST_CHECK_EQUAL(s6[2], 2); - - strides_type, ublas::layout::first_order> s7{}; - BOOST_REQUIRE_EQUAL(s7.size(), 3); - BOOST_CHECK_EQUAL(s7[0], 1); - BOOST_CHECK_EQUAL(s7[1], 2); - BOOST_CHECK_EQUAL(s7[2], 2); - - strides_type, ublas::layout::first_order> s8{}; - BOOST_REQUIRE_EQUAL(s8.size(), 3); - BOOST_CHECK_EQUAL(s8[0], 1); - BOOST_CHECK_EQUAL(s8[1], 4); - BOOST_CHECK_EQUAL(s8[2], 8); + BOOST_CHECK_EQUAL(s213[0], 1); + BOOST_CHECK_EQUAL(s213[1], 2); + BOOST_CHECK_EQUAL(s213[2], 2); + + BOOST_CHECK_EQUAL(s423[0], 1); + BOOST_CHECK_EQUAL(s423[1], 4); + BOOST_CHECK_EQUAL(s423[2], 8); } -BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_last_order) { - using namespace boost::numeric; - - strides_type, ublas::layout::last_order> s1{}; - BOOST_REQUIRE_EQUAL(s1.size(), 2); - BOOST_CHECK_EQUAL(s1[0], 1); - BOOST_CHECK_EQUAL(s1[1], 1); - - strides_type, ublas::layout::last_order> s2{}; - BOOST_REQUIRE_EQUAL(s2.size(), 2); - BOOST_CHECK_EQUAL(s2[0], 1); - BOOST_CHECK_EQUAL(s2[1], 1); - - strides_type, ublas::layout::last_order> s3{}; - BOOST_REQUIRE_EQUAL(s3.size(), 2); - BOOST_CHECK_EQUAL(s3[0], 1); - BOOST_CHECK_EQUAL(s3[1], 1); - - strides_type, ublas::layout::last_order> s4{}; - BOOST_REQUIRE_EQUAL(s4.size(), 2); - BOOST_CHECK_EQUAL(s4[0], 3); - BOOST_CHECK_EQUAL(s4[1], 1); - - strides_type, ublas::layout::last_order> s5{}; - BOOST_REQUIRE_EQUAL(s5.size(), 3); - BOOST_CHECK_EQUAL(s5[0], 3); - BOOST_CHECK_EQUAL(s5[1], 1); - BOOST_CHECK_EQUAL(s5[2], 1); - - strides_type, ublas::layout::last_order> s6{}; - BOOST_REQUIRE_EQUAL(s6.size(), 3); - BOOST_CHECK_EQUAL(s6[0], 6); - BOOST_CHECK_EQUAL(s6[1], 3); - BOOST_CHECK_EQUAL(s6[2], 1); - - strides_type, ublas::layout::last_order> s7{}; - BOOST_REQUIRE_EQUAL(s7.size(), 3); - BOOST_CHECK_EQUAL(s7[0], 3); - BOOST_CHECK_EQUAL(s7[1], 3); - BOOST_CHECK_EQUAL(s7[2], 1); - - strides_type, ublas::layout::last_order> s8{}; - BOOST_REQUIRE_EQUAL(s8.size(), 3); - BOOST_CHECK_EQUAL(s8[0], 6); - BOOST_CHECK_EQUAL(s8[1], 3); - BOOST_CHECK_EQUAL(s8[2], 1); +BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_last_order) +{ + namespace ublas = boost::numeric::ublas; + + constexpr auto s11 = ublas::to_strides_v,last_order>; + constexpr auto s12 = ublas::to_strides_v,last_order>; + constexpr auto s21 = ublas::to_strides_v,last_order>; + constexpr auto s23 = ublas::to_strides_v,last_order>; + constexpr auto s231 = ublas::to_strides_v,last_order>; + constexpr auto s213 = ublas::to_strides_v,last_order>; + constexpr auto s123 = ublas::to_strides_v,last_order>; + constexpr auto s423 = ublas::to_strides_v,last_order>; + + BOOST_REQUIRE_EQUAL(s11.size(), 2); + BOOST_REQUIRE_EQUAL(s12.size(), 2); + BOOST_REQUIRE_EQUAL(s21.size(), 2); + BOOST_REQUIRE_EQUAL(s23.size(), 2); + BOOST_REQUIRE_EQUAL(s231.size(), 3); + BOOST_REQUIRE_EQUAL(s213.size(), 3); + BOOST_REQUIRE_EQUAL(s123.size(), 3); + BOOST_REQUIRE_EQUAL(s423.size(), 3); + + + BOOST_CHECK_EQUAL(s11[0], 1); + BOOST_CHECK_EQUAL(s11[1], 1); + + BOOST_CHECK_EQUAL(s12[0], 2); //NOTE: is this the way we want the stride to be computed? + BOOST_CHECK_EQUAL(s12[1], 1); + + BOOST_CHECK_EQUAL(s21[0], 1); + BOOST_CHECK_EQUAL(s21[1], 1); + + BOOST_CHECK_EQUAL(s23[0], 3); + BOOST_CHECK_EQUAL(s23[1], 1); + + BOOST_CHECK_EQUAL(s231[0], 3); + BOOST_CHECK_EQUAL(s231[1], 1); + BOOST_CHECK_EQUAL(s231[2], 1); + + BOOST_CHECK_EQUAL(s123[0], 6); + BOOST_CHECK_EQUAL(s123[1], 3); + BOOST_CHECK_EQUAL(s123[2], 1); + + BOOST_CHECK_EQUAL(s213[0], 3); + BOOST_CHECK_EQUAL(s213[1], 3); + BOOST_CHECK_EQUAL(s213[2], 1); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_static_tensor.cpp b/test/tensor/test_static_tensor.cpp index 1fef8617d..653a2726b 100644 --- a/test/tensor/test_static_tensor.cpp +++ b/test/tensor/test_static_tensor.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -15,51 +15,45 @@ #include #include -//#ifndef BOOST_TEST_DYN_LINK -//#define BOOST_TEST_DYN_LINK -//#endif - -//#define BOOST_TEST_MODULE TestStaticTensor - #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_static_tensor ) +BOOST_AUTO_TEST_SUITE ( test_tensor_static ) using test_types = zip>::with_t; BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto a1 = ublas::static_tensor,layout_type>{}; - BOOST_CHECK_EQUAL( a1.size() , 0ul ); - BOOST_CHECK( a1.empty() ); +// auto a1 = ublas::tensor_static,layout_type>{}; +// BOOST_CHECK_EQUAL( a1.size() , 0ul ); +// BOOST_CHECK( a1.empty() ); - auto a2 = ublas::static_tensor,layout_type>{}; + auto a2 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); - auto a3 = ublas::static_tensor,layout_type>{}; + auto a3 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); - auto a4 = ublas::static_tensor,layout_type>{}; + auto a4 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); - auto a5 = ublas::static_tensor,layout_type>{}; + auto a5 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a5.size() , 2 ); BOOST_CHECK( !a5.empty() ); - auto a6 = ublas::static_tensor,layout_type>{}; + auto a6 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a6.size() , 4*3*2 ); BOOST_CHECK( !a6.empty() ); - auto a7 = ublas::static_tensor,layout_type>{}; + auto a7 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a7.size() , 4*1*2 ); BOOST_CHECK( !a7.empty() ); @@ -69,9 +63,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_ctor, value, test_types) struct fixture { template - using extents_type = boost::numeric::ublas::static_extents; + using extents_type = boost::numeric::ublas::extents; - fixture() {} + fixture()=default; std::tuple< extents_type<1,1>, // 1 @@ -85,17 +79,17 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - auto t = ublas::static_tensor{}; + auto t = ublas::tensor_static{}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - if(e.empty()) { + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); } else{ @@ -108,14 +102,14 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - auto r = ublas::static_tensor{0}; + auto r = ublas::tensor_static{0}; auto t = r; BOOST_CHECK_EQUAL ( t.size() , r.size() ); @@ -123,7 +117,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_CHECK ( t.strides() == r.strides() ); BOOST_CHECK ( t.extents() == r.extents() ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); } else{ @@ -139,17 +133,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; using other_layout_type = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto r = tensor_type{0}; - ublas::static_tensor t = r; + ublas::tensor_static t = r; tensor_type q = t; BOOST_CHECK_EQUAL ( t.size() , r.size() ); @@ -170,20 +164,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_typ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto r = tensor_type{}; auto t = std::move(r); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); } else{ @@ -192,13 +186,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; @@ -208,9 +202,9 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_ty using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; auto distribution = distribution_type(1,6); - for_each_tuple(extents, [&](auto const&, auto const& e){ + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto r = value_type( static_cast< inner_type_t >(distribution(generator)) ); auto t = tensor_type{r}; @@ -224,23 +218,23 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; - using array_type = typename tensor_type::array_type; + using tensor_type = ublas::tensor_static; + using container_type = typename tensor_type::container_type; - auto a = array_type(); + auto a = container_type(); auto v = value_type {}; for(auto& aa : a){ aa = v; v += value_type{1}; } - auto t = tensor_type{a}; + auto t = tensor_type(a); v = value_type{}; for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}) @@ -253,13 +247,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type{}; auto v = value_type {}; @@ -278,7 +272,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, va BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; @@ -293,7 +287,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check2 = [](const auto& t) { - std::array k; + std::array k = {0,0}; auto r = std::is_same::value ? 1 : 0; auto q = std::is_same::value ? 1 : 0; auto v = value_type{}; @@ -307,7 +301,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check3 = [](const auto& t) { - std::array k; + std::array k= {0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 2 : 0; auto o = op_type{}; @@ -324,7 +318,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check4 = [](const auto& t) { - std::array k; + std::array k= {0,0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 3 : 0; auto o = op_type{}; @@ -341,9 +335,9 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, } }; - auto check = [check1,check2,check3,check4](auto const&, auto const& e) { + auto check = [check1,check2,check3,check4](auto const& /*unused*/, auto const& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type{}; auto v = value_type {}; for(auto i = 0ul; i < t.size(); ++i){ @@ -351,25 +345,25 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, v+=value_type{1}; } - if constexpr ( extents_type::_size == 1) check1(t); - else if constexpr ( extents_type::_size == 2) check2(t); - else if constexpr ( extents_type::_size == 3) check3(t); - else if constexpr ( extents_type::_size == 4) check4(t); + if constexpr ( std::tuple_size_v == 1) check1(t); + else if constexpr ( std::tuple_size_v == 2) check2(t); + else if constexpr ( std::tuple_size_v == 3) check3(t); + else if constexpr ( std::tuple_size_v == 4) check4(t); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents,[](auto const&, auto& e){ + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto v = value_type {} + value_type{1}; auto t = tensor_type{v}; @@ -380,7 +374,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); - if(t.size() > 0) { + if(!t.empty()) { BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; } @@ -388,17 +382,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::static_tensor, layout_type>; +//BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) +//{ +// namespace ublas = boost::numeric::ublas; +// using value_type = typename value::first_type; +// using layout_type = typename value::second_type; +// using tensor_type = ublas::tensor_static, layout_type>; - auto t = tensor_type{}; - auto i = ublas::index::index_type<4>{}; - BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); +// auto t = tensor_type{}; +// auto i = ublas::index::index_type<4>{}; +// BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); -} +//} BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_static_tensor_matrix_vector.cpp b/test/tensor/test_static_tensor_matrix_vector.cpp index bce3749ad..22c2ba434 100644 --- a/test/tensor/test_static_tensor_matrix_vector.cpp +++ b/test/tensor/test_static_tensor_matrix_vector.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -21,58 +21,58 @@ -BOOST_AUTO_TEST_SUITE ( test_static_tensor_matrix_interoperability ) ; +BOOST_AUTO_TEST_SUITE ( test_tensor_static_matrix_interoperability ) using test_types = zip::with_t; -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; - ublas::static_tensor,layout_type> a2 = matrix_type(1,1); + ublas::tensor_static,layout> a2 = matrix(1,1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); - ublas::static_tensor,layout_type> a3 = matrix_type(2,1); + ublas::tensor_static,layout> a3 = matrix(2,1); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); - ublas::static_tensor,layout_type> a4 = matrix_type(1,2); + ublas::tensor_static,layout> a4 = matrix(1,2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); - ublas::static_tensor,layout_type> a5 = matrix_type(2,3); + ublas::tensor_static,layout> a5 = matrix(2,3); BOOST_CHECK_EQUAL( a5.size() , 6 ); BOOST_CHECK( !a5.empty() ); } -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; - ublas::static_tensor,layout_type> a2 = vector_type(1); + ublas::tensor_static,layout> a2 = vector(1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); - ublas::static_tensor,layout_type> a3 = vector_type(2); + ublas::tensor_static,layout> a3 = vector(2); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); - ublas::static_tensor,layout_type> a4 = vector_type(2); + ublas::tensor_static,layout> a4 = vector(2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); - ublas::static_tensor,layout_type> a5 = vector_type(3); + ublas::tensor_static,layout> a5 = vector(3); BOOST_CHECK_EQUAL( a5.size() , 3 ); BOOST_CHECK( !a5.empty() ); } @@ -81,96 +81,96 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) struct fixture { template - using extents_type = boost::numeric::ublas::static_extents; + using shape = boost::numeric::ublas::extents; - fixture() {} + fixture()=default; std::tuple< - extents_type<1,1>, // 0 - extents_type<2,3>, // 1 - extents_type<9,7>, // 2 - extents_type<15,17> // 3 - > extents;; + shape<1,1>, // 0 + shape<2,3>, // 1 + shape<5,8>, // 2 + shape<9,7> // 3 + > extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size()==2); - etensor_type t = matrix_type{e[0],e[1]}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e)==2); + etensor t = matrix{e[0],e[1]}; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - if constexpr( extents_type::at(1) == 1 ){ - assert(e.size()==2); - if(e.empty()) + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + if constexpr( ublas::get_v == 1 ){ + assert(ublas::size(e)==2); + if(ublas::empty(e)) return; - etensor_type t = vector_type(product(e)); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + etensor t = vector(ublas::product(e)); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + auto t = etensor{}; + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto j = 0ul; j < t.size(1); ++j){ @@ -180,34 +180,34 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; - assert(e.size() == 2); + assert(ublas::size(e) == 2); - if constexpr( extents_type::at(1) == 1 ){ - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); + if constexpr( ublas::get_v == 1 ){ + auto t = etensor{}; + auto r = vector(e[0]*e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product (e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto i = 0ul; i < t.size(); ++i){ @@ -216,32 +216,32 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + auto t = etensor{}; + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto j = 0ul; j < t.size(1); ++j){ @@ -251,36 +251,36 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - if constexpr( extents_type::at(1) == 1 ){ - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + if constexpr( ublas::get_v == 1 ){ + auto t = etensor{}; + auto r = vector(e[0]*e[1]); std::iota(r.data().begin(),r.data().end(), 1); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto i = 0ul; i < t.size(); ++i){ @@ -289,50 +289,50 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + auto t = etensor{}; + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r + etensor s = r + 3*r; + etensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); BOOST_CHECK ( !q.empty() ); @@ -343,9 +343,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); } } - }; - - for_each_tuple(extents,check); + }); } @@ -353,44 +351,44 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - if constexpr( extents_type::at(1) == 1 ){ - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; + + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + if constexpr( ublas::get_v == 1 ){ + auto t = etensor{}; + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r + etensor s = r + 3*r; + etensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); BOOST_CHECK ( !q.empty() ); @@ -401,36 +399,35 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_t BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); } } - }; - - for_each_tuple(extents,check); + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + using vector = typename tensor::vector_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; - if(product(e) <= 2) + if constexpr(ublas::product_v <= 2) return; - assert(e.size() == 2); - auto Q = ublas::static_tensor,layout_type>{} ; - auto A = matrix_type(e[0],e[1]); - auto b = vector_type(e[1]); - auto c = vector_type(e[0]); - std::iota(b.data().begin(),b.data().end(), 1); - std::fill(A.data().begin(),A.data().end(), 1); - std::fill(c.data().begin(),c.data().end(), 2); + assert(ublas::size_v == 2); + + auto Q = ublas::tensor_static,1>,layout>{} ; + auto A = matrix(e[0],e[1]); + auto b = vector(e[1]); + auto c = vector(e[0]); + std::iota(b.data().begin(),b.data().end(), value{1}); + std::fill(A.data().begin(),A.data().end(), value{1}); + std::fill(c.data().begin(),c.data().end(), value{2}); std::fill(Q.begin(),Q.end(), 2); decltype(Q) T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; @@ -442,17 +439,15 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); BOOST_CHECK ( !T.empty() ); - for(auto i = 0ul; i < T.size(); ++i){ - auto n = e[1]; - auto ab = n * (n+1) / 2; - BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - } - - }; + auto n = e[1]; + auto ab = value(std::div(n*(n+1),2).quot); + const auto ref = ab+4*Q(0)+2*c(0); + BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); +// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - for_each_tuple(extents,check); + }); } diff --git a/test/tensor/test_strides.cpp b/test/tensor/test_strides.cpp index 25d292223..71ef94256 100644 --- a/test/tensor/test_strides.cpp +++ b/test/tensor/test_strides.cpp @@ -12,157 +12,149 @@ #include -#include -#include - -//BOOST_AUTO_TEST_SUITE(test_strides, * boost::unit_test::depends_on("test_extents")); +#include BOOST_AUTO_TEST_SUITE(test_strides) using test_types = std::tuple; +using extents = boost::numeric::ublas::extents<>; +using first_order = boost::numeric::ublas::layout::first_order; +using last_order = boost::numeric::ublas::layout::last_order; + + BOOST_AUTO_TEST_CASE_TEMPLATE( test_strides_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; + constexpr auto layout = value{}; + + auto s1 = ublas::to_strides(extents {1},layout); + auto s5 = ublas::to_strides(extents {5},layout); + auto s11 = ublas::to_strides(extents {1,1},layout); + auto s12 = ublas::to_strides(extents {1,2},layout); + auto s21 = ublas::to_strides(extents {2,1},layout); + auto s23 = ublas::to_strides(extents {2,3},layout); + auto s231 = ublas::to_strides(extents{2,3,1},layout); + auto s123 = ublas::to_strides(extents{1,2,3},layout); + auto s423 = ublas::to_strides(extents{4,2,3},layout); + + BOOST_CHECK (! s1.empty()); + BOOST_CHECK (! s5.empty()); + BOOST_CHECK (! s11.empty()); + BOOST_CHECK (! s12.empty()); + BOOST_CHECK (! s21.empty()); + BOOST_CHECK (! s23.empty()); + BOOST_CHECK (!s231.empty()); + BOOST_CHECK (!s123.empty()); + BOOST_CHECK (!s423.empty()); + + BOOST_CHECK_EQUAL ( s1.size(), 1); + BOOST_CHECK_EQUAL ( s5.size(), 1); + BOOST_CHECK_EQUAL ( s11.size(), 2); + BOOST_CHECK_EQUAL ( s12.size(), 2); + BOOST_CHECK_EQUAL ( s21.size(), 2); + BOOST_CHECK_EQUAL ( s23.size(), 2); + BOOST_CHECK_EQUAL ( s231.size(), 3); + BOOST_CHECK_EQUAL ( s123.size(), 3); + BOOST_CHECK_EQUAL ( s423.size(), 3); +} - using extents_type = ublas::basic_extents; - using strides_type = ublas::strides_t; +BOOST_AUTO_TEST_CASE( test_strides_ctor_access_first_order) +{ + namespace ublas = boost::numeric::ublas; + constexpr auto layout = first_order{}; - strides_type s0{}; - BOOST_CHECK ( s0.empty()); - BOOST_CHECK_EQUAL ( s0.size(), 0); + auto s1 = ublas::to_strides(extents {1},layout); + auto s5 = ublas::to_strides(extents {5},layout); + auto s11 = ublas::to_strides(extents {1,1},layout); + auto s12 = ublas::to_strides(extents {1,2},layout); + auto s21 = ublas::to_strides(extents {2,1},layout); + auto s23 = ublas::to_strides(extents {2,3},layout); + auto s231 = ublas::to_strides(extents{2,3,1},layout); + auto s123 = ublas::to_strides(extents{1,2,3},layout); + auto s423 = ublas::to_strides(extents{4,2,3},layout); - strides_type s1{extents_type{1,1}}; - BOOST_CHECK (!s1.empty()); - BOOST_CHECK_EQUAL ( s1.size(), 2); + BOOST_REQUIRE_EQUAL ( s11 .size(),2); + BOOST_REQUIRE_EQUAL ( s12 .size(),2); + BOOST_REQUIRE_EQUAL ( s21 .size(),2); + BOOST_REQUIRE_EQUAL ( s23 .size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); - strides_type s2{extents_type{1,2}}; - BOOST_CHECK (!s2.empty()); - BOOST_CHECK_EQUAL ( s2.size(), 2); - strides_type s3{extents_type{2,1}}; - BOOST_CHECK (!s3.empty()); - BOOST_CHECK_EQUAL ( s3.size(), 2); + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); - strides_type s4{extents_type{2,3}}; - BOOST_CHECK (!s4.empty()); - BOOST_CHECK_EQUAL ( s4.size(), 2); + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); - strides_type s5{extents_type{2,3,1}}; - BOOST_CHECK (!s5.empty()); - BOOST_CHECK_EQUAL ( s5.size(), 3); + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); - strides_type s6{extents_type{1,2,3}}; - BOOST_CHECK (!s6.empty()); - BOOST_CHECK_EQUAL ( s6.size(), 3); - strides_type s7{extents_type{4,2,3}}; - BOOST_CHECK (!s7.empty()); - BOOST_CHECK_EQUAL ( s7.size(), 3); -} + BOOST_CHECK_EQUAL ( s23[0], 1); + BOOST_CHECK_EQUAL ( s23[1], 2); -BOOST_AUTO_TEST_CASE( test_strides_ctor_access_first_order) -{ - using namespace boost::numeric; - - using extents_type = ublas::basic_extents; - using strides_type = ublas::strides_t; - - strides_type s1{extents_type{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - strides_type s2{extents_type{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - strides_type s3{extents_type{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - strides_type s4{extents_type{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 1); - BOOST_CHECK_EQUAL ( s4[1], 2); - - strides_type s5{extents_type{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 1); - BOOST_CHECK_EQUAL ( s5[1], 2); - BOOST_CHECK_EQUAL ( s5[2], 6); - - strides_type s6{extents_type{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 1); - BOOST_CHECK_EQUAL ( s6[1], 1); - BOOST_CHECK_EQUAL ( s6[2], 2); - - strides_type s7{extents_type{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 1); - BOOST_CHECK_EQUAL ( s7[1], 2); - BOOST_CHECK_EQUAL ( s7[2], 2); - - strides_type s8{extents_type{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 1); - BOOST_CHECK_EQUAL ( s8[1], 4); - BOOST_CHECK_EQUAL ( s8[2], 8); + BOOST_CHECK_EQUAL ( s231[0], 1); + BOOST_CHECK_EQUAL ( s231[1], 2); + BOOST_CHECK_EQUAL ( s231[2], 6); + + BOOST_CHECK_EQUAL ( s123[0], 1); + BOOST_CHECK_EQUAL ( s123[1], 1); + BOOST_CHECK_EQUAL ( s123[2], 2); + + BOOST_CHECK_EQUAL ( s423[0], 1); + BOOST_CHECK_EQUAL ( s423[1], 4); + BOOST_CHECK_EQUAL ( s423[2], 8); } BOOST_AUTO_TEST_CASE( test_strides_ctor_access_last_order) { - using namespace boost::numeric; - - using extents_type = ublas::basic_extents; - using strides_type = ublas::strides_t; - - strides_type s1{extents_type{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - strides_type s2{extents_type{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - strides_type s3{extents_type{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - strides_type s4{extents_type{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 3); - BOOST_CHECK_EQUAL ( s4[1], 1); - - strides_type s5{extents_type{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 3); - BOOST_CHECK_EQUAL ( s5[1], 1); - BOOST_CHECK_EQUAL ( s5[2], 1); - - strides_type s6{extents_type{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 6); - BOOST_CHECK_EQUAL ( s6[1], 3); - BOOST_CHECK_EQUAL ( s6[2], 1); - - strides_type s7{extents_type{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 3); - BOOST_CHECK_EQUAL ( s7[1], 3); - BOOST_CHECK_EQUAL ( s7[2], 1); - - strides_type s8{extents_type{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 6); - BOOST_CHECK_EQUAL ( s8[1], 3); - BOOST_CHECK_EQUAL ( s8[2], 1); + namespace ublas = boost::numeric::ublas; + constexpr auto layout = last_order{}; + + auto s1 = ublas::to_strides(extents {1},layout); + auto s5 = ublas::to_strides(extents {5},layout); + auto s11 = ublas::to_strides(extents {1,1},layout); + auto s12 = ublas::to_strides(extents {1,2},layout); + auto s21 = ublas::to_strides(extents {2,1},layout); + auto s23 = ublas::to_strides(extents {2,3},layout); + auto s231 = ublas::to_strides(extents{2,3,1},layout); + auto s123 = ublas::to_strides(extents{1,2,3},layout); + auto s423 = ublas::to_strides(extents{4,2,3},layout); + + BOOST_REQUIRE_EQUAL ( s11 .size(),2); + BOOST_REQUIRE_EQUAL ( s12 .size(),2); + BOOST_REQUIRE_EQUAL ( s21 .size(),2); + BOOST_REQUIRE_EQUAL ( s23 .size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); + + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); + + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); + + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); + + BOOST_CHECK_EQUAL ( s23[0], 3); + BOOST_CHECK_EQUAL ( s23[1], 1); + + BOOST_CHECK_EQUAL ( s231[0], 3); + BOOST_CHECK_EQUAL ( s231[1], 1); + BOOST_CHECK_EQUAL ( s231[2], 1); + + BOOST_CHECK_EQUAL ( s123[0], 6); + BOOST_CHECK_EQUAL ( s123[1], 3); + BOOST_CHECK_EQUAL ( s123[2], 1); + + BOOST_CHECK_EQUAL ( s423[0], 6); + BOOST_CHECK_EQUAL ( s423[1], 3); + BOOST_CHECK_EQUAL ( s423[2], 1); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_tensor.cpp b/test/tensor/test_tensor.cpp index 4ded6b1a0..ce16c8916 100644 --- a/test/tensor/test_tensor.cpp +++ b/test/tensor/test_tensor.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -18,14 +18,13 @@ #ifndef BOOST_TEST_DYN_LINK #define BOOST_TEST_DYN_LINK #endif - +// NOLINTNEXTLINE #define BOOST_TEST_MODULE Tensor #include #include "utility.hpp" -// BOOST_AUTO_TEST_SUITE ( test_tensor, * boost::unit_test::depends_on("test_extents") ) ; BOOST_AUTO_TEST_SUITE ( test_tensor ) using test_types = zip>::with_t; @@ -33,15 +32,15 @@ using test_types = zip>::with_t; + using tensor_type = ublas::tensor_dynamic; - auto a1 = tensor_type{}; - BOOST_CHECK_EQUAL( a1.size() , 0ul ); - BOOST_CHECK( a1.empty() ); - BOOST_CHECK_EQUAL( a1.data() , nullptr); +// auto a1 = tensor_type{}; +// BOOST_CHECK_EQUAL( a1.size() , 0ul ); +// BOOST_CHECK( a1.empty() ); +// BOOST_CHECK_EQUAL( a1.data() , nullptr); auto a2 = tensor_type{1,1}; BOOST_CHECK_EQUAL( a2.size() , 1 ); @@ -82,7 +81,6 @@ struct fixture using extents_type = boost::numeric::ublas::extents<>; fixture() : extents { - extents_type{}, // 0 extents_type{1,1}, // 1 extents_type{1,2}, // 2 extents_type{2,1}, // 3 @@ -100,16 +98,16 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) { auto t = tensor_type{e}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - if(e.empty()) { + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size(e) ); + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); BOOST_CHECK_EQUAL ( t.data() , nullptr); } @@ -126,10 +124,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) { @@ -140,7 +138,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_CHECK ( t.strides() == r.strides() ); BOOST_CHECK ( t.extents() == r.extents() ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); BOOST_CHECK_EQUAL ( t.data() , nullptr); } @@ -160,12 +158,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; using other_layout_type = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; - using other_tensor_type = ublas::dynamic_tensor; + using other_tensor_type = ublas::tensor_dynamic; for(auto const& e : extents) @@ -191,19 +189,19 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_typ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) { auto r = tensor_type{e}; auto t = std::move(r); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); BOOST_CHECK_EQUAL ( t.data() , nullptr); } @@ -221,10 +219,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; std::random_device device{}; std::minstd_rand0 generator(device()); @@ -244,14 +242,14 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using array_type = typename tensor_type::array_type; + using tensor_type = ublas::tensor_dynamic; + using container_type = typename tensor_type::container_type; for(auto const& e : extents) { - auto a = array_type(product(e)); + auto a = container_type(product(e)); auto v = value_type {}; for(auto& aa : a){ @@ -270,10 +268,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& e : extents) { auto t = tensor_type{e}; @@ -292,10 +290,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, va BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check1 = [](const tensor_type& t) { auto v = value_type{}; @@ -307,7 +305,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check2 = [](const tensor_type& t) { - std::array k; + std::array k = {0,0}; auto r = std::is_same::value ? 1 : 0; auto q = std::is_same::value ? 1 : 0; auto v = value_type{}; @@ -321,7 +319,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check3 = [](const tensor_type& t) { - std::array k; + std::array k = {0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 2 : 0; auto o = op_type{}; @@ -338,7 +336,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check4 = [](const tensor_type& t) { - std::array k; + std::array k = {0,0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 3 : 0; auto o = op_type{}; @@ -379,10 +377,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& efrom : extents){ for(auto const& eto : extents){ @@ -393,17 +391,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixtu for(auto i = 0ul; i < t.size(); ++i) BOOST_CHECK_EQUAL( t[i], v ); - t.reshape(eto); - for(auto i = 0ul; i < std::min(product(efrom),product(eto)); ++i) - BOOST_CHECK_EQUAL( t[i], v ); + auto r = reshape(t,eto); + for(auto i = 0ul; i < std::min(ublas::product(efrom),ublas::product(eto)); ++i) + BOOST_CHECK_EQUAL( r[i], v ); - BOOST_CHECK_EQUAL ( t.size() , product(eto) ); - BOOST_CHECK_EQUAL ( t.rank() , eto.size() ); - BOOST_CHECK ( t.extents() == eto ); + BOOST_CHECK_EQUAL ( r.size() , ublas::product(eto) ); + BOOST_CHECK_EQUAL ( r.rank() , ublas::size (eto) ); + BOOST_CHECK ( r.extents() == eto ); if(efrom != eto){ for(auto i = product(efrom); i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], value_type{} ); + BOOST_CHECK_EQUAL( r[i], value_type{} ); } } } @@ -414,10 +412,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixtu BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& e_t : extents){ for(auto const& e_r : extents) { @@ -431,15 +429,15 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) for(auto i = 0ul; i < t.size(); ++i) BOOST_CHECK_EQUAL( t[i], w ); - BOOST_CHECK_EQUAL ( t.size() , product(e_r) ); - BOOST_CHECK_EQUAL ( t.rank() , e_r.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e_r) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e_r) ); BOOST_CHECK ( t.extents() == e_r ); for(auto i = 0ul; i < r.size(); ++i) BOOST_CHECK_EQUAL( r[i], v ); - BOOST_CHECK_EQUAL ( r.size() , product(e_t) ); - BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); + BOOST_CHECK_EQUAL ( r.size() , ublas::product(e_t) ); + BOOST_CHECK_EQUAL ( r.rank() , ublas::size (e_t) ); BOOST_CHECK ( r.extents() == e_t ); @@ -451,10 +449,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& e : extents) { @@ -467,7 +465,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); - if(t.size() > 0) { + if(!t.empty()) { BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; } @@ -476,17 +474,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; - std::vector vec(30); - BOOST_CHECK_THROW(tensor_type({5,5},vec), std::runtime_error); + std::vector vec(2); + BOOST_CHECK_THROW(tensor_type({5,5},vec), std::invalid_argument); auto t = tensor_type{{5,5}}; auto i = ublas::index::index_type<4>{}; - BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); + BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::invalid_argument); } diff --git a/test/tensor/test_tensor_matrix_vector.cpp b/test/tensor/test_tensor_matrix_vector.cpp index 7672bb31a..bea8566a1 100644 --- a/test/tensor/test_tensor_matrix_vector.cpp +++ b/test/tensor/test_tensor_matrix_vector.cpp @@ -19,67 +19,65 @@ #include "utility.hpp" -// BOOST_AUTO_TEST_SUITE ( test_tensor_matrix_interoperability, * boost::unit_test::depends_on("test_tensor") ) ; - BOOST_AUTO_TEST_SUITE ( test_tensor_matrix_interoperability ) using test_types = zip::with_t; -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; - tensor_type a2 = matrix_type(1,1); + tensor a2 = matrix(1,1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - tensor_type a3 = matrix_type(2,1); + tensor a3 = matrix(2,1); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - tensor_type a4 = matrix_type(1,2); + tensor a4 = matrix(1,2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - tensor_type a5 = matrix_type(2,3); + tensor a5 = matrix(2,3); BOOST_CHECK_EQUAL( a5.size() , 6 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); } -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; - tensor_type a2 = vector_type(1); + tensor a2 = vector(1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - tensor_type a3 = vector_type(2); + tensor a3 = vector(2); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - tensor_type a4 = vector_type(2); + tensor a4 = vector(2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - tensor_type a5 = vector_type(3); + tensor a5 = vector(3); BOOST_CHECK_EQUAL( a5.size() , 3 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); @@ -88,34 +86,35 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents{ - extents_type{1,1}, // 1 - extents_type{2,3}, // 2 - extents_type{9,11}, // 2 - extents_type{15,17}} // 3 - { - } - std::vector extents; + using extents_type = boost::numeric::ublas::extents<>; + fixture() + : extents{ + extents_type{1,1}, // 1 + extents_type{2,3}, // 2 + extents_type{5,6}, // 3 + extents_type{9,7}} // 4 + { + } + + std::vector extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; auto check = [](auto const& e) { - assert(e.size()==2); - tensor_type t = matrix_type{e[0],e[1]}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + assert(ublas::size(e)==2); + tensor t = matrix{e[0],e[1]}; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); }; @@ -125,22 +124,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size()==2); - if(e.empty()) + assert(ublas::size(e)==2); + if(ublas::empty(e)) return; - tensor_type t = vector_type(product(e)); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + tensor t = vector(ublas::product(e)); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); }; @@ -151,26 +150,27 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = matrix_type(e[0],e[1]); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -186,26 +186,26 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, te } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -218,27 +218,27 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, te check(e); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = matrix(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -256,27 +256,27 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, te -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -293,43 +293,43 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, te -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = matrix(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r + 3*r; - tensor_type s = r + 3*r; - tensor_type q = s + r + 3*r + s; // + 3*r + tensor s = r + 3*r; + tensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_NE ( s.data() , nullptr); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); BOOST_CHECK ( !q.empty() ); BOOST_CHECK_NE ( q.data() , nullptr); @@ -352,43 +352,43 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r + 3*r; - tensor_type s = r + 3*r; - tensor_type q = s + r + 3*r + s; // + 3*r + tensor s = r + 3*r; + tensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size(e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size(e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_NE ( s.data() , nullptr); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size(e) ); BOOST_CHECK ( !q.empty() ); BOOST_CHECK_NE ( q.data() , nullptr); @@ -407,30 +407,31 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_t -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { if(product(e) <= 2) return; - assert(e.size() == 2); - auto Q = tensor_type{e[0],1}; - auto A = matrix_type(e[0],e[1]); - auto b = vector_type(e[1]); - auto c = vector_type(e[0]); - std::iota(b.data().begin(),b.data().end(), 1); - std::fill(A.data().begin(),A.data().end(), 1); - std::fill(c.data().begin(),c.data().end(), 2); - std::fill(Q.begin(),Q.end(), 2); - - tensor_type T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; + + assert(ublas::size(e) == 2); + auto Q = tensor{e[0],1}; + auto A = matrix(e[0],e[1]); + auto b = vector(e[1]); + auto c = vector(e[0]); + std::iota(b.data().begin(),b.data().end(), value{1}); + std::fill(A.data().begin(),A.data().end(), value{1}); + std::fill(c.data().begin(),c.data().end(), value{2}); + std::fill(Q.begin(),Q.end(), value{2}); + + tensor T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); @@ -440,11 +441,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, BOOST_CHECK ( !T.empty() ); BOOST_CHECK_NE ( T.data() , nullptr); - for(auto i = 0ul; i < T.size(); ++i){ - auto n = e[1]; - auto ab = n * (n+1) / 2; - BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - } + const auto n = e[1]; + const auto ab = value(std::div(n*(n+1),2).quot); + const auto ref = ab+4*Q(0)+2*c(0); + BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); + +// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); }; diff --git a/test/tensor/utility.hpp b/test/tensor/utility.hpp index 6dc5f1a45..93571be2a 100644 --- a/test/tensor/utility.hpp +++ b/test/tensor/utility.hpp @@ -14,6 +14,8 @@ #define _BOOST_UBLAS_TEST_TENSOR_UTILITY_ #include +#include +#include template struct zip_helper; @@ -48,55 +50,38 @@ struct zip_helper, type1, types1...> template using zip = zip_helper,types...>; -template -struct for_each_tuple_impl{ - static_assert(sizeof...(Ts) > I, "Static Assert in boost::numeric::ublas::detail::for_each_tuple"); - auto operator()(std::tuple& t, CallBack call_back) - { - call_back(I,std::get(t)); - if constexpr(sizeof...(Ts) - 1 > I){ - for_each_tuple_impl it; - it(t,call_back); - } - } -}; - -template -auto for_each_tuple(std::tuple& t, CallBack call_back){ - if constexpr (std::tuple_size_v> == 0u ) - return; - for_each_tuple_impl<0,CallBack,Ts...> f; - f(t,call_back); +template +void for_each_in_tuple(std::tuple const& tuple, UnaryOp&& op) +{ + auto invoke_op_for_tuple = [&](std::index_sequence) { + (..., std::invoke(op, Is, std::get(tuple))); + }; + invoke_op_for_tuple(std::make_index_sequence>>{}); } +namespace boost::numeric::ublas +{ -template -struct list{ - static constexpr size_t size = sizeof...(Ts); -}; +template +void for_each_in_index(std::index_sequence, TA const& a, TB const& b, UnaryOp&& op) +{ + (..., std::invoke(op,a,b,std::index_sequence{}) ); +} -template -struct for_each_list_impl{ - constexpr decltype(auto) operator()(list l, CallBack call_back){ - using new_list = list; - using value_type = T; - call_back(I,value_type{}); - - if constexpr(new_list::size != 0){ - for_each_list_impl it; - it(new_list{},call_back); - } - } -}; +}// namespace boost::numeric::ublas +//template +//void for_each_in_tuple(std::index_sequence, UnaryOp&& op) +//{ +// auto invoke_op_for_tuple = [&](std::index_sequence) { +// (..., std::invoke(op, Is, Is)); +// }; + +// invoke_op_for_tuple(std::make_index_sequence::size()>{}); +//} -template -auto for_each_list(list l, CallBack call_back){ - for_each_list_impl<0,CallBack,Ts...> f; - f(l,call_back); -} #include From d8aa27b9a4a3a5f651e38363e1dc26cfa0a2d117 Mon Sep 17 00:00:00 2001 From: isaac868 <33269071+isaac868@users.noreply.github.com> Date: Tue, 17 Aug 2021 16:22:04 -0700 Subject: [PATCH 2/2] Fixed parameter ordering --- include/boost/numeric/ublas/opencl/elementwise.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/boost/numeric/ublas/opencl/elementwise.hpp b/include/boost/numeric/ublas/opencl/elementwise.hpp index 72da6a0de..eb6165366 100644 --- a/include/boost/numeric/ublas/opencl/elementwise.hpp +++ b/include/boost/numeric/ublas/opencl/elementwise.hpp @@ -229,7 +229,7 @@ void element_sub(ublas::matrix const &a, ublas::matrix &result, compute::command_queue& queue) { - element_wise(a, b, compute::minus(), result, queue); + element_wise(a, b, result, compute::minus(), queue); } template