diff --git a/btas/generic/converge_class.h b/btas/generic/converge_class.h index 84d38e63..031755cb 100644 --- a/btas/generic/converge_class.h +++ b/btas/generic/converge_class.h @@ -4,6 +4,9 @@ #include #include #include +#include +#include +#include #include namespace btas { @@ -23,7 +26,7 @@ namespace btas { public: /// constructor for the base convergence test object /// \param[in] tol tolerance for ALS convergence - explicit NormCheck(double tol = 1e-3) : tol_(tol) { + explicit NormCheck(double tol = 1e-3) : tol_(tol), iter_(0) { } ~NormCheck() = default; @@ -50,17 +53,34 @@ namespace btas { prev[r] = btas_factors[r]; } + if (verbose_) { + std::cout << rank_ << "\t" << iter_ << "\t" << std::setprecision(16) << diff << std::endl; + } if (diff < this->tol_) { return true; } + ++iter_; + return false; } + /// Option to print fit and change in fit in the () operator call + /// \param[in] verb bool which turns off/on fit printing. + void verbose(bool verb) { + verbose_ = verb; + } + + double get_fit(bool hit_max_iters = false){ + + } + private: double tol_; std::vector prev; // Set of previous factor matrices size_t ndim; // Number of factor matrices ind_t rank_; // Rank of the CP problem + bool verbose_ = false; + size_t iter_; }; /** @@ -105,7 +125,7 @@ namespace btas { } double normFactors = norm(btas_factors, V); - double normResidual = sqrt(abs(normT_ * normT_ + normFactors * normFactors - 2 * abs(iprod))); + double normResidual = sqrt(abs(normT_ * normT_ + normFactors - 2 * abs(iprod))); double fit = 1. - (normResidual / normT_); double fitChange = abs(fitOld_ - fit); @@ -208,11 +228,11 @@ namespace btas { } } - dtype nrm = 0.0; + RT nrm = 0.0; for (auto &i : coeffMat) { - nrm += i; + nrm += std::real(i); } - return sqrt(abs(nrm)); + return nrm; } }; @@ -421,5 +441,315 @@ namespace btas { return sqrt(abs(nrm)); } }; + + /** + \breif Class used to decide when ALS problem is converged. + The fit is not computed and the optimization just runs until nALS is + reached. + **/ + template + class NoCheck { + using ind_t = typename Tensor::range_type::index_type::value_type; + using ord_t = typename range_traits::ordinal_type; + + public: + /// constructor for the base convergence test object + /// \param[in] tol tolerance for ALS convergence + explicit NoCheck(double tol = 1e-3) : iter_(0){ + } + + ~NoCheck() = default; + + /// Function to check convergence of the ALS problem + /// convergence when \f$ \sum_n^{ndim} \frac{\|A^{i}_n - A^{i+1}_n\|}{dim(A^{i}_n} \leq \epsilon \f$ + /// \param[in] btas_factors Current set of factor matrices + bool operator () (const std::vector &btas_factors, + const std::vector & V = std::vector()){ + auto rank_ = btas_factors[1].extent(1); + if (verbose_) { + std::cout << rank_ << "\t" << iter_ << std::endl; + } + ++iter_; + + return false; + } + + /// Option to print fit and change in fit in the () operator call + /// \param[in] verb bool which turns off/on fit printing. + void verbose(bool verb) { + verbose_ = verb; + } + + double get_fit(bool hit_max_iters = false){ + + } + + private: + double tol_; + bool verbose_ = false; + size_t iter_; + Tensor prevT_; + }; + + /// This class is going to take a tensor approximation + /// and compare it to the previous tensor approximation + /// Skipping the total fit and directly computing the relative fit + template + class ApproxFitCheck{ + using ind_t = typename Tensor::range_type::index_type::value_type; + using ord_t = typename range_traits::ordinal_type; + + public: + /// constructor for the base convergence test object + /// \param[in] tol tolerance for ALS convergence + explicit ApproxFitCheck(double tol = 1e-3) : iter_(0), tol_(tol){ + } + + ~ApproxFitCheck() = default; + + /// Function to check convergence of the ALS problem + /// convergence when \f$ \sum_n^{ndim} \frac{\|A^{i}_n - A^{i+1}_n\|}{dim(A^{i}_n} \leq \epsilon \f$ + /// \param[in] btas_factors Current set of factor matrices + + bool operator () (std::vector & btas_factors, + const std::vector & V = std::vector()) { + auto rank_ = btas_factors[1].extent(1); + + auto fit = 0.0; + if(iter_ == 0) { + fit_prev_ = (norm(btas_factors, btas_factors, rank_)); + norm_prev_ = sqrt(fit_prev_); + prev_factors = btas_factors; +// diff = reconstruct(btas_factors, orders); + if (verbose_) { + std::cout << rank_ << "\t" << iter_ << "\t" << 1.0 << std::endl; + } + ++iter_; + return false; + } + + auto curr_norm = norm(btas_factors, btas_factors, rank_); + fit = sqrt(fit_prev_ - 2 * norm(prev_factors, btas_factors, rank_) + curr_norm) / norm_prev_; +// fit = norm(diff); +// diff = tnew; + fit_prev_ = curr_norm; + norm_prev_ = sqrt(curr_norm); + prev_factors = btas_factors; + + if (verbose_) { + std::cout << rank_ << "\t" << iter_ << "\t" << fit << std::endl; + } + ++iter_; + if (fit < tol_) { + ++converged_num; + if(converged_num > 1) { + iter_ = 0; + return true; + } + } + return false; + } + + void verbose(bool verb){ + verbose_ = verb; + } + + private: + double tol_; + bool verbose_ = false; + double fit_prev_, norm_prev_; + std::vector orders; + std::vector prev_factors; +// Tensor diff; + size_t converged_num = 0; + size_t iter_; + + double norm(Tensor& a){ + auto n = 0.0; + for(auto & i : a) + n += i * i; + return sqrt(n); + } + + double norm(std::vector & facs1, std::vector& facs2, ind_t rank_){ + BTAS_ASSERT(facs1.size() == facs2.size()); + ind_t num_factors = facs1.size(); + Tensor RRp; + Tensor T1 = facs1[0], T2 = facs2[0]; + auto lam_ptr1 = facs1[num_factors - 1].data(), + lam_ptr2 = facs2[num_factors - 1].data(); + for (ind_t i = 0; i < rank_; i++) { + scal(T1.extent(0), *(lam_ptr1 + i), std::begin(T1) + i, rank_); + scal(T2.extent(0), *(lam_ptr2 + i), std::begin(T2) + i, rank_); + } + + contract(1.0, T1, {1,2}, T2, {1,3}, 0.0, RRp, {2,3}); + + for (ind_t i = 0; i < rank_; i++) { + auto val1 = *(lam_ptr1 + i), + val2 = *(lam_ptr2 + i); + scal(T1.extent(0), (abs(val1) > 1e-12 ? 1.0/val1 : 1.0), std::begin(T1) + i, rank_); + scal(T2.extent(0), (abs(val2) > 1e-12 ? 1.0/val2 : 1.0), std::begin(T2) + i, rank_); + } + + auto * ptr_RRp = RRp.data(); + for (ind_t i = 1; i < num_factors - 3; ++i) { + Tensor temp; + contract(1.0, facs1[i], {1,2}, facs2[i], {1,3}, 0.0, temp, {2,3}); + auto * ptr_temp = temp.data(); + for(ord_t r = 0; r < rank_ * rank_; ++r) + *(ptr_RRp + r) *= *(ptr_temp + r); + } + Tensor temp; + auto last = num_factors - 2; + contract(1.0, facs1[last], {1,2}, facs2[last], {1,3}, 0.0, temp, {2,3}); + return btas::dot(RRp, temp); + } + + }; + + /** + \breif This is a class that computes the difference in two fits + /| T - T^{i} \|^2 - \| T - T^{i + 1}\|^2 = T^{i}^2 - 2 TT^{i} + 2 TT^{i+1} - T^{i+1}^2 + **/ + template + class DiffFitCheck{ + using ind_t = typename Tensor::range_type::index_type::value_type; + using ord_t = typename range_traits::ordinal_type; + using dtype = typename Tensor::value_type; + + public: + /// constructor for the base convergence test object + /// \param[in] tol tolerance for ALS convergence + explicit DiffFitCheck(double tol = 1e-3) : iter_(0), tol_(tol){ + } + + ~DiffFitCheck() = default; + + /// Function to check convergence of the ALS problem + /// convergence when \f$ \sum_n^{ndim} \frac{\|A^{i}_n - A^{i+1}_n\|}{dim(A^{i}_n} \leq \epsilon \f$ + /// \param[in] btas_factors Current set of factor matrices + + bool operator () (std::vector & btas_factors, + const std::vector & V = std::vector()) { + auto rank_ = btas_factors[1].extent(1); + auto n = btas_factors.size() - 1; + auto & lambda = btas_factors[n]; + auto fit = 0.0; + if(iter_ == 0) { + fit_prev_ = sqrt(abs(norm(V, lambda, rank_) - 2.0 * abs(compute_inner_product(btas_factors[n - 1], lambda)))); + if (verbose_) { + std::cout << rank_ << "\t" << iter_ << "\t" << 1.0 << std::endl; + } + ++iter_; + return false; + } + + auto curr_norm = sqrt(abs(norm(V, lambda, rank_) - 2.0 * abs(compute_inner_product(btas_factors[n - 1], lambda)))); + fit = sqrt(abs(fit_prev_ * fit_prev_ - curr_norm * curr_norm)); + fit_prev_ = curr_norm; + + if (verbose_) { + std::cout << rank_ << "\t" << iter_ << "\t" << fit << std::endl; + } + ++iter_; + if (fit < tol_) { + ++converged_num; + if(converged_num > 1) { + return true; + } + } + return false; + } + + void verbose(bool verb){ + verbose_ = verb; + } + + void set_MtKRP(Tensor & MtKRP){ + MtKRP_ = MtKRP; + } + + private: + double tol_; + bool verbose_ = false; + double fit_prev_; + Tensor MtKRP_; + size_t converged_num = 0; + size_t iter_; + + dtype compute_inner_product(Tensor &last_factor, Tensor& lambda){ + ord_t size = last_factor.size(); + ind_t rank = last_factor.extent(1); + auto *ptr_A = last_factor.data(); + auto *ptr_MtKRP = MtKRP_.data(); + auto lam_ptr = lambda.data(); + dtype iprod = 0.0; + for (ord_t i = 0; i < size; ++i) { + iprod += *(ptr_MtKRP + i) * btas::impl::conj(*(ptr_A + i)) * *(lam_ptr + i % rank); + } + return iprod; + } + + double norm(const std::vector &V, Tensor & lambda, ind_t rank_) { + auto n = V.size(); + Tensor coeffMat; + typename Tensor::value_type one = 1.0; + ger(one, lambda.conj(), lambda, coeffMat); + + auto rank2 = rank_ * (ord_t)rank_; + Tensor temp(rank_, rank_); + + auto *ptr_coeff = coeffMat.data(); + for (size_t i = 0; i < n; ++i) { + auto *ptr_V = V[i].data(); + for (ord_t j = 0; j < rank2; ++j) { + *(ptr_coeff + j) *= *(ptr_V + j); + } + } + + dtype nrm = 0.0; + for (auto &i : coeffMat) { + nrm += i; + } + return nrm; + } + + double norm(std::vector & facs1, std::vector& facs2, ind_t rank_){ + BTAS_ASSERT(facs1.size() == facs2.size()); + ind_t num_factors = facs1.size(); + Tensor RRp; + Tensor T1 = facs1[0], T2 = facs2[0]; + auto lam_ptr1 = facs1[num_factors - 1].data(), + lam_ptr2 = facs2[num_factors - 1].data(); + for (ind_t i = 0; i < rank_; i++) { + scal(T1.extent(0), *(lam_ptr1 + i), std::begin(T1) + i, rank_); + scal(T2.extent(0), *(lam_ptr2 + i), std::begin(T2) + i, rank_); + } + + contract(1.0, T1, {1,2}, T2, {1,3}, 0.0, RRp, {2,3}); + + for (ind_t i = 0; i < rank_; i++) { + auto val1 = *(lam_ptr1 + i), + val2 = *(lam_ptr2 + i); + scal(T1.extent(0), (abs(val1) > 1e-12 ? 1.0/val1 : 1.0), std::begin(T1) + i, rank_); + scal(T2.extent(0), (abs(val2) > 1e-12 ? 1.0/val2 : 1.0), std::begin(T2) + i, rank_); + } + + auto * ptr_RRp = RRp.data(); + for (ind_t i = 1; i < num_factors - 3; ++i) { + Tensor temp; + contract(1.0, facs1[i], {1,2}, facs2[i], {1,3}, 0.0, temp, {2,3}); + auto * ptr_temp = temp.data(); + for(ord_t r = 0; r < rank_ * rank_; ++r) + *(ptr_RRp + r) *= *(ptr_temp + r); + } + Tensor temp; + auto last = num_factors - 2; + contract(1.0, facs1[last], {1,2}, facs2[last], {1,3}, 0.0, temp, {2,3}); + return btas::dot(RRp, temp); + } + + }; } //namespace btas #endif // BTAS_GENERIC_CONV_BASE_CLASS diff --git a/btas/generic/cp.h b/btas/generic/cp.h index 384b6e08..e8044717 100644 --- a/btas/generic/cp.h +++ b/btas/generic/cp.h @@ -50,11 +50,15 @@ namespace btas { t.set_MtKRPR(tensor); } + template + void set_MtKRP(DiffFitCheck &t, Tensor &tensor){ + t.set_MtKRP(tensor); + } // Functions that can get the fit \|X - \hat{X}\|_F where // \hat{X} is the CP approximation (epsilon), if // converge_class object isn't FitCheck do nothing template - void get_fit(T &t, double &epsilon) { + void get_fit(T &t, double &epsilon, bool max_iter = false) { // epsilon = epsilon; epsilon = -1; return; @@ -516,6 +520,16 @@ namespace btas { return left_side_product; } + /// Create a rank \c rank initial guess using + /// random numbers from a uniform distribution + + template + Tensor make_random_factor(ind_t row, ind_t col, Generator gen, Distribution dist){ + Tensor T(row, col); + fill_random(T, gen, dist); + return T; + } + /// \param[in] factor Which factor matrix to normalize, returns /// the \c factor factor matrix with all columns normalized. /// \return The column norms of the \c factor factor matrix diff --git a/btas/generic/cp_als.h b/btas/generic/cp_als.h index 39698e2d..e7e5b6a6 100644 --- a/btas/generic/cp_als.h +++ b/btas/generic/cp_als.h @@ -531,52 +531,52 @@ namespace btas { double &epsilon, bool &fast_pI) override { boost::random::mt19937 generator(random_seed_accessor()); boost::random::uniform_real_distribution<> distribution(-1.0, 1.0); - if(A.empty()) { - for (size_t i = 0; i < this->ndim; ++i) { - // If this mode is symmetric to a previous mode, set it equal to - // previous mode, else make a random matrix. - auto tmp = symmetries[i]; - if (tmp != i) { - A.push_back(A[tmp]); - } else { - Tensor a(tensor_ref.extent(i), rank); - for (auto iter = a.begin(); iter != a.end(); ++iter) { - *(iter) = distribution(generator); + if(!factors_set) { + if (A.empty()) { + for (size_t i = 0; i < this->ndim; ++i) { + // If this mode is symmetric to a previous mode, set it equal to + // previous mode, else make a random matrix. + auto tmp = symmetries[i]; + if (tmp != i) { + A.push_back(A[tmp]); + } else { + Tensor a(tensor_ref.extent(i), rank); + for (auto iter = a.begin(); iter != a.end(); ++iter) { + *(iter) = distribution(generator); + } + this->A.push_back(a); + this->normCol(i); } - this->A.push_back(a); - this->normCol(i); } - } - } else{ - for (size_t i = 0; i < this->ndim; ++i) { - // If this mode is symmetric to a previous mode, set it equal to - // previous mode, else make a random matrix. - auto tmp = symmetries[i]; - if (tmp != i) { - A.push_back(A[tmp]); - } else { - ind_t col_dim = tensor_ref.extent(i); - Tensor a(col_dim, rank); - for (auto iter = a.begin(); iter != a.end(); ++iter) { - *(iter) = distribution(generator); + } else { + for (size_t i = 0; i < this->ndim; ++i) { + // If this mode is symmetric to a previous mode, set it equal to + // previous mode, else make a random matrix. + auto tmp = symmetries[i]; + if (tmp != i) { + A.push_back(A[tmp]); + } else { + ind_t col_dim = tensor_ref.extent(i); + Tensor a(col_dim, rank); + for (auto iter = a.begin(); iter != a.end(); ++iter) { + *(iter) = distribution(generator); + } + auto &a_prev = A[i]; + ind_t prev_rank = a_prev.extent(1), smaller_rank = (prev_rank < rank ? prev_rank : rank); + auto lo_bound = {0l, 0l}, up_bound = {col_dim, smaller_rank}; + auto view = make_view(a.range().slice(lo_bound, up_bound), a.storage()); + std::copy(view.begin(), view.end(), a_prev.begin()); + + a_prev = a; + this->normCol(i); } - auto & a_prev = A[i]; - ind_t prev_rank = a_prev.extent(1), - smaller_rank = (prev_rank < rank ? prev_rank : rank); - auto lo_bound = {0l, 0l}, - up_bound = {col_dim, smaller_rank}; - auto view = make_view(a.range().slice(lo_bound, up_bound), a.storage()); - std::copy(view.begin(), view.end(), a_prev.begin()); - - a_prev = a; - this->normCol(i); } + A.pop_back(); } - A.pop_back(); + Tensor lambda(rank); + lambda.fill(0.0); + this->A.push_back(lambda); } - Tensor lambda(rank); - lambda.fill(0.0); - this->A.push_back(lambda); ALS(rank, converge_test, direct, max_als, calculate_epsilon, epsilon, fast_pI); } @@ -633,7 +633,7 @@ namespace btas { contract(this->one, ai, {1, 2}, ai.conj(), {1, 3}, this->zero, AtA[i], {2, 3}); } is_converged = converge_test(A, AtA); - }while (count < max_als && !is_converged); + } while (count < max_als && !is_converged); detail::get_fit(converge_test, epsilon, (this->num_ALS == max_als)); epsilon = 1.0 - epsilon; diff --git a/btas/generic/cp_df_als.h b/btas/generic/cp_df_als.h index d6549ab3..d287d06f 100644 --- a/btas/generic/cp_df_als.h +++ b/btas/generic/cp_df_als.h @@ -382,16 +382,31 @@ namespace btas { std::tuple, std::vector> get_init_factors(){ return std::make_tuple(init_factors_left, init_factors_right); } + + void set_init_factors(std::vector init){ + factors_set = true; + this->A = init; + } + + void use_old_factors(){ + factors_set = true; + } + + std::vector get_close_to_end_factors(){ + return close_to_end_factors; + } protected: Tensor &tensor_ref_left; // Left connected tensor Tensor &tensor_ref_right; // Right connected tensor size_t ndimL; // Number of dimensions in left tensor size_t ndimR; // number of dims in the right tensor bool lastLeft = false; + bool factors_set = false; Tensor leftTimesRight; std::vector dims; std::vector init_factors_left; std::vector init_factors_right; + std::vector close_to_end_factors; /// Creates an initial guess by computing the SVD of each mode /// If the rank of the mode is smaller than the CP rank requested @@ -422,7 +437,6 @@ namespace btas { void build(ind_t rank, ConvClass &converge_test, bool direct, ind_t max_als, bool calculate_epsilon, ind_t step, double &epsilon, bool SVD_initial_guess, ind_t SVD_rank, bool &fast_pI) override { { - bool factors_set = false; // If its the first time into build and SVD_initial_guess // build and optimize the initial guess based on the left // singular vectors of the reference tensor. @@ -669,31 +683,33 @@ namespace btas { double &epsilon, bool &fast_pI) override { boost::random::mt19937 generator(random_seed_accessor()); boost::random::uniform_real_distribution<> distribution(-1.0, 1.0); - for (size_t i = 1; i < ndimL; ++i) { - auto &tensor_ref = tensor_ref_left; - Tensor a(Range{Range1{tensor_ref.extent(i)}, Range1{rank}}); - for (auto iter = a.begin(); iter != a.end(); ++iter) { - *(iter) = distribution(generator); + if(!factors_set) { + for (size_t i = 1; i < ndimL; ++i) { + auto &tensor_ref = tensor_ref_left; + Tensor a(Range{Range1{tensor_ref.extent(i)}, Range1{rank}}); + for (auto iter = a.begin(); iter != a.end(); ++iter) { + *(iter) = distribution(generator); + } + A.push_back(a); } - A.push_back(a); - } - for (size_t i = 1; i < ndimR; ++i) { - auto &tensor_ref = tensor_ref_right; + for (size_t i = 1; i < ndimR; ++i) { + auto &tensor_ref = tensor_ref_right; - Tensor a(tensor_ref.extent(i), rank); - for (auto iter = a.begin(); iter != a.end(); ++iter) { - *(iter) = distribution(generator); + Tensor a(tensor_ref.extent(i), rank); + for (auto iter = a.begin(); iter != a.end(); ++iter) { + *(iter) = distribution(generator); + } + this->A.push_back(a); } - this->A.push_back(a); - } - Tensor lambda(rank); - lambda.fill(0.0); - this->A.push_back(lambda); - for (size_t i = 0; i < ndim; ++i) { - normCol(i); + Tensor lambda(rank); + lambda.fill(0.0); + this->A.push_back(lambda); + for (size_t i = 0; i < ndim; ++i) { + normCol(i); + } } - + factors_set = true; ALS(rank, converge_test, max_als, calculate_epsilon, epsilon, fast_pI); } @@ -722,11 +738,20 @@ namespace btas { // intermediate bool is_converged = false; bool matlab = fast_pI; - Tensor MtKRP(A[ndim - 1].extent(0), rank); +// Tensor MtKRP(A[ndim - 1].extent(0), rank); leftTimesRight = Tensor(1); leftTimesRight.fill(0.0); - // std::cout << "count\tfit\tchange" << std::endl; - while (count < max_als && !is_converged) { + + if(this->AtA.empty()) + this->AtA = std::vector(ndim); + auto ptr_ata = this->AtA.begin(); + for (size_t i = 0; i < ndim; ++i, ++ptr_ata) { + auto &a_mat = A[i]; + *ptr_ata = Tensor(); + contract(this->one, a_mat, {1, 2}, a_mat.conj(), {1, 3}, this->zero, *ptr_ata, {2, 3}); + } + + do { count++; this->num_ALS++; for (size_t i = 0; i < ndim; i++) { @@ -738,13 +763,17 @@ namespace btas { } else { BTAS_EXCEPTION("Incorrectly defined symmetry"); } + contract(this->one, A[i], {1, 2}, A[i].conj(), {1, 3}, this->zero, this->AtA[i], {2, 3}); } - is_converged = converge_test(A); - } + is_converged = converge_test(A, this->AtA); + if(count == 10) + close_to_end_factors = this->A; + } while (count < max_als && !is_converged); // Checks loss function if required detail::get_fit(converge_test, epsilon, (this->num_ALS == max_als)); epsilon = 1.0 - epsilon; + this->AtA.clear(); // Checks loss function if required if (calculate_epsilon && epsilon == 2) { // TODO make this work for non-FitCheck convergence_classes diff --git a/btas/generic/tuck_cp_als.h b/btas/generic/tuck_cp_als.h index 469723df..ef3ed15a 100644 --- a/btas/generic/tuck_cp_als.h +++ b/btas/generic/tuck_cp_als.h @@ -207,7 +207,7 @@ namespace btas{ contract(this->one, tucker_factors[i], {1, 2}, ai.conj(), {2, 3}, this->zero, transformed_A[i], {1, 3}); } is_converged = converge_test(A, AtA); - }while (count < max_als && !is_converged); + } while (count < max_als && !is_converged); detail::get_fit(converge_test, epsilon, (this->num_ALS == max_als)); epsilon = 1.0 - epsilon; diff --git a/unittest/tensor_cp_test.cc b/unittest/tensor_cp_test.cc index 11f9b1c2..b981aa6b 100644 --- a/unittest/tensor_cp_test.cc +++ b/unittest/tensor_cp_test.cc @@ -18,6 +18,9 @@ TEST_CASE("CP") { typedef btas::Tensor tensor; using conv_class = btas::FitCheck; + using nocheck_conv = btas::NoCheck; + using appx_conv = btas::ApproxFitCheck; + using diff_conv = btas::DiffFitCheck; using conv_class_coupled = btas::CoupledFitCheck; using btas::CP_ALS; using btas::CP_RALS; @@ -77,6 +80,9 @@ TEST_CASE("CP") double norm52 = sqrt(dot(D55, D55)); conv_class conv(1e-7); + nocheck_conv nocheck(1e-7); + appx_conv appxcheck(1e-3); + diff_conv diffcheck(1e-3); // ALS tests { SECTION("ALS MODE = 3, Finite rank"){ @@ -85,6 +91,27 @@ TEST_CASE("CP") double diff = A1.compute_rank(10, conv, 1, false, 0, 100, false, false, true); CHECK(std::abs(diff) <= epsilon); } + SECTION("ALS Mode = 3, Finite rank, no check"){ + CP_ALS A1(D3); + A1.compute_rank(10, nocheck, 1, false, 0, 100, false, false, true); + auto apx = A1.reconstruct() - D3; + auto diff = sqrt(btas::dot(apx, apx)) / norm3; + CHECK(std::abs(diff) <= epsilon); + } + SECTION("ALS Mode = 3, Finite rank, approx check"){ + CP_ALS A1(D3); + A1.compute_rank(10, appxcheck, 1, false, 0, 100, false, false, true); + auto apx = A1.reconstruct() - D3; + auto diff = sqrt(btas::dot(apx, apx)) / norm3; + CHECK(std::abs(diff) <= epsilon); + } + SECTION("ALS Mode = 3, Finite rank, diff fit check"){ + CP_ALS A1(D3); + A1.compute_rank(10, diffcheck, 1, false, 0, 100, false, false, true); + auto apx = A1.reconstruct() - D3; + auto diff = sqrt(btas::dot(apx, apx)) / norm3; + CHECK(std::abs(diff) <= epsilon); + } SECTION("ALS MODE = 3, Finite error"){ CP_ALS A1(D3); conv.set_norm(norm3); @@ -116,6 +143,28 @@ TEST_CASE("CP") double diff = A1.compute_rank(55, conv, 1, true, 55); CHECK(std::abs(diff) <= epsilon); } + SECTION("ALS Mode = 4, Finite rank, no check"){ + CP_ALS A1(D4); + A1.compute_rank(55, nocheck, 1, true, 55, 50); + auto apx = A1.reconstruct() - D4; + auto diff = sqrt(btas::dot(apx, apx)) / norm4; + CHECK(std::abs(diff) <= epsilon); + } + SECTION("ALS Mode = 3, Finite rank, approx check"){ + CP_ALS A1(D4); + A1.compute_rank(55, appxcheck, 1, true, 55, 50); + auto apx = A1.reconstruct() - D4; + auto diff = sqrt(btas::dot(apx, apx)) / norm4; + CHECK(std::abs(diff) <= epsilon); + } + SECTION("ALS Mode = 3, Finite rank, diff fit check"){ + CP_ALS A1(D4); + A1.compute_rank(55, diffcheck, 1, true, 55, 50); + auto apx = A1.reconstruct() - D4; + auto diff = sqrt(btas::dot(apx, apx)) / norm4; + CHECK(std::abs(diff) <= epsilon); + } + SECTION("ALS MODE = 4, Finite error"){ CP_ALS A1(D4); conv.set_norm(norm4); diff --git a/unittest/ztensor_cp_test.cc b/unittest/ztensor_cp_test.cc index e9f9a0fa..23794dd5 100644 --- a/unittest/ztensor_cp_test.cc +++ b/unittest/ztensor_cp_test.cc @@ -146,7 +146,7 @@ TEST_CASE("ZCP") { SECTION("RALS MODE = 4, Finite rank") { CP_RALS A1(Z4); conv.set_norm(norm4.real()); - double diff = A1.compute_rank(67, conv, 1, true, 65); + double diff = A1.compute_rank(70, conv, 1, true, 65); CHECK(std::abs(diff) <= epsilon); } SECTION("RALS MODE = 4, Finite error"){