Skip to content

Commit

Permalink
Fixed a bug in calculation of correlation length when CTM is shrinked
Browse files Browse the repository at this point in the history
  • Loading branch information
yomichi committed Jan 10, 2024
1 parent c9b0de4 commit a39ed85
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 29 deletions.
10 changes: 8 additions & 2 deletions src/arnoldi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <cmath> // for sqrt
#include <complex> // for complex, ope...
#include <functional> // for function
#include <ostream>
#include <vector> // for __vector_bas...

#include "util/abs.hpp"
Expand All @@ -32,7 +33,7 @@ namespace tenes {

template <class ptensor>
Arnoldi<ptensor>::Arnoldi(size_t N, size_t maxvec)
: N(N), maxvec(maxvec), Q(maxvec + 1), H(Shape(maxvec + 1, maxvec)) {
: N(N), maxvec(maxvec), Q(maxvec + 1), H(Shape(maxvec + 1, maxvec)), mpisize(-1), mpirank(-1) {
for (size_t i = 0; i < maxvec; ++i) {
Q[i] = ptensor(mptensor::Shape(N));
}
Expand All @@ -41,6 +42,8 @@ Arnoldi<ptensor>::Arnoldi(size_t N, size_t maxvec)
template <class ptensor>
void Arnoldi<ptensor>::initialize(ptensor const& initial) {
Q[0] = initial;
mpisize = Q[0].get_comm_size();
mpirank = Q[0].get_comm_rank();
orthonormalize(0);
}

Expand All @@ -53,10 +56,14 @@ double norm(ptensor const& A) {
template <class ptensor>
void Arnoldi<ptensor>::run(std::function<void(ptensor&, ptensor const&)> A,
size_t nev, int mindim, int maxiter, double rtol) {
if (mpisize < 0){
throw std::runtime_error("Arnoldi::run: initialize() has not been called");
}
if (mindim < nev) {
mindim = nev;
}
this->nev = nev;
int iter = 0;
for (size_t k = 1; k <= nev; ++k) {
A(Q[k], Q[k - 1]);
orthonormalize(k);
Expand Down Expand Up @@ -92,7 +99,6 @@ template <class ptensor>
void Arnoldi<ptensor>::orthonormalize(size_t k) {
for (size_t j = 0; j < k; ++j) {
auto xx = tensordot(conj(Q[j]), Q[k], {0}, {0});
// std::cout << xx << std::endl;
auto x = trace(xx, {}, {});
// auto x = trace(tensordot(conj(Q[j]), Q[k], {0}, {0}), {}, {});
H.set_value({j, k - 1}, x);
Expand Down
3 changes: 3 additions & 0 deletions src/arnoldi.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,9 @@ class Arnoldi {

std::vector<ptensor> Q;
small_tensor<value_type> H;

int mpisize;
int mpirank;
};

} // end of namespace tenes
Expand Down
43 changes: 19 additions & 24 deletions src/iTPS/core/ctm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -240,28 +240,23 @@ void Calc_projector_left_block(const tensor &C1, const tensor &C4,
Shape shape_col(t34, e56, t34);

int cut = std::min(std::min(std::min(peps_parameters.CHI, e78 * t41 * t41), e12 * t12 * t12), e56 * t34 * t34);
/*int info ()= */
rsvd(m_row, m_col, shape_row, shape_col, U, s, VT, cut,
static_cast<size_t>(peps_parameters.RSVD_Oversampling_factor * cut));
double denom = s[0];
std::vector<double> s_c(cut);
std::vector<double> s_c;
s_c.reserve(cut);

for (int i = 0; i < cut; ++i) {
if (s[i] / denom > peps_parameters.Inverse_projector_cut) {
s_c[i] = 1.0 / sqrt(s[i]);
s_c.push_back(1.0 / sqrt(s[i]));
} else {
s_c[i] = 0.0;
cut = i;
break;
}
}

// U.multiply_vector(s, 3);
// VT.multiply_vector(s, 0);


tensor U_c = mptensor::slice(U, 3, 0, cut);
tensor VT_c = mptensor::slice(VT, 0, 0, cut);
s_c.resize(cut);
U_c.multiply_vector(s_c, 3);
VT_c.multiply_vector(s_c, 0);

Expand All @@ -275,19 +270,18 @@ void Calc_projector_left_block(const tensor &C1, const tensor &C4,
tensor VT;
std::vector<double> s;

/* int info = */
svd(tensordot(LT, LB, Axes(1, 3, 5), Axes(0, 2, 4)), Axes(0, 1, 2),
Axes(3, 4, 5), U, s, VT);
double denom = s[0];
// s_c.resize(e78);
int cut = std::min(std::min(std::min(peps_parameters.CHI, e78 * t41 * t41), e12 * t12 * t12), e56 * t34 * t34);
std::vector<double> s_c(cut);
int cut = s.size();
cut = std::min(cut, peps_parameters.CHI);
std::vector<double> s_c;
s_c.reserve(cut);

for (int i = 0; i < cut; ++i) {
if (s[i] / denom > peps_parameters.Inverse_projector_cut) {
s_c[i] = 1.0 / sqrt(s[i]);
s_c.push_back(1.0 / sqrt(s[i]));
} else {
s_c[i] = 0.0;
cut = i;
break;
}
Expand Down Expand Up @@ -342,6 +336,8 @@ void Calc_projector_updown_blocks(
// int t34 = Tn3.shape()[0];
int t41 = Tn4.shape()[1];

bool shrink_chi = true;

if (t41 != 1) {
/*
INFO:104 (2,3) Finish 72/104 script=[3, 0, 1, -1, 2, -1, 4, -1, -1]
Expand Down Expand Up @@ -414,17 +410,16 @@ void Calc_projector_updown_blocks(
Shape shape_col(t23, e34, t23);

int cut = std::min(peps_parameters.CHI, e34 * t23 * t23);
/* int info = */
rsvd(m_row, m_col, shape_row, shape_col, U, s, VT, cut,
static_cast<size_t>(peps_parameters.RSVD_Oversampling_factor * cut));
double denom = s[0];

std::vector<double> s_c(cut);
std::vector<double> s_c;
s_c.reserve(cut);
for (int i = 0; i < cut; ++i) {
if (s[i] / denom > peps_parameters.Inverse_projector_cut) {
s_c[i] = 1.0 / sqrt(s[i]);
s_c.push_back(1.0 / sqrt(s[i]));
} else {
s_c[i] = 0.0;
cut = i;
break;
}
Expand All @@ -451,18 +446,18 @@ void Calc_projector_updown_blocks(
tensor VT;
std::vector<double> s;

/* int info = */
svd(tensordot(R1, R2, Axes(3, 4, 5), Axes(3, 4, 5)), Axes(0, 1, 2),
Axes(3, 4, 5), U, s, VT);
double denom = s[0];
int cut = std::min(peps_parameters.CHI, e34 * t23 * t23);
std::vector<double> s_c(cut);
int cut = s.size();
cut = std::min(cut, peps_parameters.CHI);
std::vector<double> s_c;
s_c.reserve(cut);

for (int i = 0; i < cut; ++i) {
if (s[i] / denom > peps_parameters.Inverse_projector_cut) {
s_c[i] = 1.0 / sqrt(s[i]);
s_c.push_back(1.0 / sqrt(s[i]));
} else {
s_c[i] = 0.0;
cut = i;
break;
}
Expand Down
21 changes: 18 additions & 3 deletions src/iTPS/transfer_matrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,9 @@ std::vector<std::complex<double>> TransferMatrix<ptensor>::eigenvalues(
return eigvals;
}

// for debug output
// const auto mpirank = this->Tn[0].get_comm_rank();

if (N <= params.maxdim_dense_eigensolver) {
ptensor matrix = dir == 0 ? matrix_horizontal(fixed_coord)
: matrix_vertical(fixed_coord);
Expand Down Expand Up @@ -160,20 +163,23 @@ template <class ptensor>
ptensor TransferMatrix_ctm<ptensor>::initial_vector(int dir, int fixed_coord,
std::mt19937 &rng) const {
const auto &lattice = this->lattice;
const size_t N = C1[0].shape()[0];

ptensor initial_vec;
if (dir == 0) {
int site = lattice.index(0, fixed_coord);
ptensor left_top = C1[site];
ptensor left_bottom = C4[lattice.top(site)];
initial_vec = reshape(tensordot(left_top, left_bottom, {0}, {1}), {N * N});
const size_t CHI_top = left_top.shape()[1];
const size_t CHI_bottom = left_bottom.shape()[0];
initial_vec = reshape(tensordot(left_top, left_bottom, {0}, {1}), {CHI_top * CHI_bottom});
} else {
int site = lattice.index(fixed_coord, 0);
auto left_bottom = C4[site];
auto right_bottom = C3[lattice.left(site)];
const size_t CHI_left = left_bottom.shape()[1];
const size_t CHI_right = right_bottom.shape()[0];
initial_vec =
reshape(tensordot(left_bottom, right_bottom, {0}, {1}), {N * N});
reshape(tensordot(left_bottom, right_bottom, {0}, {1}), {CHI_left * CHI_right});
}
return initial_vec;
}
Expand All @@ -190,6 +196,10 @@ void TransferMatrix_ctm<ptensor>::matvec_horizontal(ptensor &outvec,
const size_t CHI1 = eTb[s1].shape()[1];
outvec = reshape(invec, {CHI0, CHI1});
const size_t rank = eTt[0].rank();

// for debug output
// const auto mpirank = invec.get_comm_rank();

if (rank == 3) {
// iTPO
for (int x = 0; x < lattice.LX; ++x) {
Expand Down Expand Up @@ -244,6 +254,9 @@ void TransferMatrix_ctm<ptensor>::matvec_vertical(ptensor &outvec,
const auto x_orig = x;
outvec = reshape(invec, {CHI0, CHI1});

// for debug output
// const auto mpirank = invec.get_comm_rank();

const size_t rank = eTl[0].rank();

if (rank == 3) {
Expand Down Expand Up @@ -520,6 +533,8 @@ ptensor TransferMatrix_mf<ptensor>::matrix_vertical(int x) const {
template <class ptensor>
size_t TransferMatrix_ctm<ptensor>::dim(int dir, int fixed_coord) const {
const auto &lattice = this->lattice;
// for debug output
const auto mpirank = this->Tn[0].get_comm_rank();
if(dir == 0){
const int s0 = lattice.index(0, fixed_coord);
const int s1 = lattice.top(s0);
Expand Down

0 comments on commit a39ed85

Please sign in to comment.