Usage examples: serial solvers¶
These examples show how to find a few smallest eigenvalues of a real symmetric matrix and their respective eigenvectors. The matrix is a banded one with the bandwidth equal to 5. Note that the matrix is never stored in memory and is defined only by the rule of how it acts on a vector.
Eigen 3¶
1#include <cmath>
2#include <iostream>
3
4// This example shows how to use ezARPACK and the Eigen3 storage backend
5// to partially diagonalize a large sparse symmetric matrix
6// and find a number of its low-lying eigenvalues.
7
8#include <ezarpack/arpack_solver.hpp>
9#include <ezarpack/storages/eigen.hpp>
10#include <ezarpack/version.hpp>
11
12using namespace ezarpack;
13using namespace Eigen;
14
15// Size of the matrix
16const int N = 10000;
17
18// We are going to use a band matrix with this bandwidth
19const int bandwidth = 5;
20
21// The number of low-lying eigenvalues we want to compute
22const int N_ev = 10;
23
24int main() {
25
26 // Print ezARPACK version
27 std::cout << "Using ezARPACK version " << EZARPACK_VERSION << std::endl;
28
29 // Construct a solver object for the symmetric case.
30 // For the Eigen3 storage backend, other options would be
31 // * `arpack_solver<ezarpack::Asymmetric, eigen_storage>' for general
32 // real matrices;
33 // * `arpack_solver<ezarpack::Complex, eigen_storage>' for general
34 // complex matrices.
35 using solver_t = arpack_solver<ezarpack::Symmetric, eigen_storage>;
36 solver_t solver(N);
37
38 // Specify parameters for the solver
39 using params_t = solver_t::params_t;
40 params_t params(N_ev, // Number of low-lying eigenvalues
41 params_t::Smallest, // We want the smallest eigenvalues
42 true); // Yes, we want the eigenvectors
43 // (Ritz vectors) as well
44
45 // Linear operator representing multiplication of a given vector by our matrix
46 // The operator must act on the 'in' vector and store results in 'out'.
47 auto matrix_op = [](solver_t::vector_const_view_t in,
48 solver_t::vector_view_t out) {
49 out.fill(0); // Clear result
50
51 // out_i = \sum_j A_{ij} in_j
52 // A_{ij} = |i-j| / (1 + i + j), if |i-j| <= bandwidth, zero otherwise
53 for(int i = 0; i < N; ++i) {
54 int j_min = std::max(0, i - bandwidth);
55 int j_max = std::min(N - 1, i + bandwidth);
56 for(int j = j_min; j <= j_max; ++j) {
57 out(i) += double(std::abs(i - j)) / (1 + i + j) * in(j);
58 }
59 }
60 };
61
62 // Run diagonalization!
63 solver(matrix_op, params);
64
65 // Number of converged eigenvalues
66 std::cout << solver.nconv() << " out of " << params.n_eigenvalues
67 << " eigenvalues have converged" << std::endl;
68
69 // Print found eigenvalues
70 std::cout << "Eigenvalues (Ritz values):" << std::endl;
71 std::cout << solver.eigenvalues().transpose() << std::endl;
72
73 // Check A*v = \lambda*v
74 auto const& lambda = solver.eigenvalues();
75 auto const& v = solver.eigenvectors();
76 VectorXd lhs(N), rhs(N);
77
78 for(int i = 0; i < N_ev; ++i) { // For each eigenpair ...
79 const VectorXd eigenvec = v.col(i);
80 matrix_op(eigenvec.head(N), lhs.head(N)); // calculate A*v
81 rhs = lambda(i) * eigenvec; // and \lambda*v
82
83 std::cout << i << ": deviation = " << (rhs - lhs).norm() / N << std::endl;
84 }
85
86 // Print some computation statistics
87 auto stats = solver.stats();
88
89 std::cout << "Number of Lanczos update iterations: " << stats.n_iter
90 << std::endl;
91 std::cout << "Total number of OP*x operations: " << stats.n_op_x_operations
92 << std::endl;
93 std::cout << "Total number of steps of re-orthogonalization: "
94 << stats.n_reorth_steps << std::endl;
95
96 return 0;
97}
Blaze¶
1#include <cmath>
2#include <iostream>
3
4// This example shows how to use ezARPACK and the Blaze storage backend
5// to partially diagonalize a large sparse symmetric matrix
6// and find a number of its low-lying eigenvalues.
7
8#include <ezarpack/arpack_solver.hpp>
9#include <ezarpack/storages/blaze.hpp>
10#include <ezarpack/version.hpp>
11
12using namespace ezarpack;
13using namespace blaze;
14
15// Size of the matrix
16const int N = 10000;
17
18// We are going to use a band matrix with this bandwidth
19const int bandwidth = 5;
20
21// The number of low-lying eigenvalues we want to compute
22const int N_ev = 10;
23
24int main() {
25
26 // Print ezARPACK version
27 std::cout << "Using ezARPACK version " << EZARPACK_VERSION << std::endl;
28
29 // Construct a solver object for the symmetric case.
30 // For the Blaze storage backend, other options would be
31 // * `arpack_solver<ezarpack::Asymmetric, blaze_storage>' for general
32 // real matrices;
33 // * `arpack_solver<ezarpack::Complex, blaze_storage>' for general
34 // complex matrices.
35 using solver_t = arpack_solver<ezarpack::Symmetric, blaze_storage>;
36 solver_t solver(N);
37
38 // Specify parameters for the solver
39 using params_t = solver_t::params_t;
40 params_t params(N_ev, // Number of low-lying eigenvalues
41 params_t::Smallest, // We want the smallest eigenvalues
42 true); // Yes, we want the eigenvectors
43 // (Ritz vectors) as well
44
45 // Linear operator representing multiplication of a given vector by our matrix
46 // The operator must act on the 'in' vector and store results in 'out'.
47 auto matrix_op = [](solver_t::vector_const_view_t in,
48 solver_t::vector_view_t out) {
49 out.reset(); // Clear result
50
51 // out_i = \sum_j A_{ij} in_j
52 // A_{ij} = |i-j| / (1 + i + j), if |i-j| <= bandwidth, zero otherwise
53 for(int i = 0; i < N; ++i) {
54 int j_min = std::max(0, i - bandwidth);
55 int j_max = std::min(N - 1, i + bandwidth);
56 for(int j = j_min; j <= j_max; ++j) {
57 out[i] += double(std::abs(i - j)) / (1 + i + j) * in[j];
58 }
59 }
60 };
61
62 // Run diagonalization!
63 solver(matrix_op, params);
64
65 // Number of converged eigenvalues
66 std::cout << solver.nconv() << " out of " << params.n_eigenvalues
67 << " eigenvalues have converged" << std::endl;
68
69 // Print found eigenvalues
70 std::cout << "Eigenvalues (Ritz values):" << std::endl;
71 std::cout << trans(solver.eigenvalues()) << std::endl;
72
73 // Check A*v = \lambda*v
74 auto const& lambda = solver.eigenvalues();
75 auto const& v = solver.eigenvectors();
76 DynamicVector<double> lhs(N), rhs(N);
77
78 for(int i = 0; i < N_ev; ++i) { // For each eigenpair ...
79 const DynamicVector<double> eigenvec = column(v, i);
80 matrix_op(subvector(eigenvec, 0, N), subvector(lhs, 0, N)); // calculate A*v
81 rhs = lambda[i] * eigenvec; // and \lambda*v
82
83 std::cout << i << ": deviation = " << norm(rhs - lhs) / N << std::endl;
84 }
85
86 // Print some computation statistics
87 auto stats = solver.stats();
88
89 std::cout << "Number of Lanczos update iterations: " << stats.n_iter
90 << std::endl;
91 std::cout << "Total number of OP*x operations: " << stats.n_op_x_operations
92 << std::endl;
93 std::cout << "Total number of steps of re-orthogonalization: "
94 << stats.n_reorth_steps << std::endl;
95
96 return 0;
97}
Armadillo¶
1#include <cmath>
2#include <iostream>
3
4// This example shows how to use ezARPACK and the Armadillo storage backend
5// to partially diagonalize a large sparse symmetric matrix
6// and find a number of its low-lying eigenvalues.
7
8#include <ezarpack/arpack_solver.hpp>
9#include <ezarpack/storages/armadillo.hpp>
10#include <ezarpack/version.hpp>
11
12using namespace ezarpack;
13using namespace arma;
14
15// Size of the matrix
16const int N = 10000;
17
18// We are going to use a band matrix with this bandwidth
19const int bandwidth = 5;
20
21// The number of low-lying eigenvalues we want to compute
22const int N_ev = 10;
23
24int main() {
25
26 // Print ezARPACK version
27 std::cout << "Using ezARPACK version " << EZARPACK_VERSION << std::endl;
28
29 // Construct a solver object for the symmetric case.
30 // For the Armadillo storage backend, other options would be
31 // * `arpack_solver<Asymmetric, armadillo_storage>' for general real matrices;
32 // * `arpack_solver<Complex, armadillo_storage>' for general complex matrices.
33 using solver_t = arpack_solver<Symmetric, armadillo_storage>;
34 solver_t solver(N);
35
36 // Specify parameters for the solver
37 using params_t = solver_t::params_t;
38 params_t params(N_ev, // Number of low-lying eigenvalues
39 params_t::Smallest, // We want the smallest eigenvalues
40 true); // Yes, we want the eigenvectors
41 // (Ritz vectors) as well
42
43 // Linear operator representing multiplication of a given vector by our matrix
44 // The operator must act on the 'in' vector and store results in 'out'.
45 auto matrix_op = [](solver_t::vector_const_view_t in,
46 solver_t::vector_view_t out) {
47 out.zeros(); // Clear result
48
49 // out_i = \sum_j A_{ij} in_j
50 // A_{ij} = |i-j| / (1 + i + j), if |i-j| <= bandwidth, zero otherwise
51 for(int i = 0; i < N; ++i) {
52 int j_min = std::max(0, i - bandwidth);
53 int j_max = std::min(N - 1, i + bandwidth);
54 for(int j = j_min; j <= j_max; ++j) {
55 out[i] += double(std::abs(i - j)) / (1 + i + j) * in[j];
56 }
57 }
58 };
59
60 // Run diagonalization!
61 solver(matrix_op, params);
62
63 // Number of converged eigenvalues
64 std::cout << solver.nconv() << " out of " << params.n_eigenvalues
65 << " eigenvalues have converged" << std::endl;
66
67 // Print found eigenvalues
68 std::cout << "Eigenvalues (Ritz values):" << std::endl;
69 std::cout << solver.eigenvalues().t() << std::endl;
70
71 // Check A*v = \lambda*v
72 auto const& lambda = solver.eigenvalues();
73 auto const& v = solver.eigenvectors();
74 vec lhs(N), rhs(N);
75
76 for(int i = 0; i < N_ev; ++i) { // For each eigenpair ...
77 auto const eigenvec = v.col(i);
78 matrix_op(eigenvec, lhs(span::all)); // calculate A*v
79 rhs = lambda[i] * eigenvec; // and \lambda*v
80
81 std::cout << i << ": deviation = " << norm(rhs - lhs) / N << std::endl;
82 }
83
84 // Print some computation statistics
85 auto stats = solver.stats();
86
87 std::cout << "Number of Lanczos update iterations: " << stats.n_iter
88 << std::endl;
89 std::cout << "Total number of OP*x operations: " << stats.n_op_x_operations
90 << std::endl;
91 std::cout << "Total number of steps of re-orthogonalization: "
92 << stats.n_reorth_steps << std::endl;
93
94 return 0;
95}
Boost uBLAS¶
1#include <algorithm>
2#include <cmath>
3#include <iostream>
4
5// This example shows how to use ezARPACK and the uBLAS storage backend
6// to partially diagonalize a large sparse symmetric matrix
7// and find a number of its low-lying eigenvalues.
8
9#include <ezarpack/arpack_solver.hpp>
10#include <ezarpack/storages/ublas.hpp>
11#include <ezarpack/version.hpp>
12
13#include <boost/numeric/ublas/io.hpp>
14
15using namespace ezarpack;
16using namespace boost::numeric::ublas;
17
18// Size of the matrix
19const int N = 10000;
20
21// We are going to use a band matrix with this bandwidth
22const int bandwidth = 5;
23
24// The number of low-lying eigenvalues we want to compute
25const int N_ev = 10;
26
27int main() {
28
29 // Print ezARPACK version
30 std::cout << "Using ezARPACK version " << EZARPACK_VERSION << std::endl;
31
32 // Construct a solver object for the symmetric case.
33 // For the uBLAS storage backend, other options would be
34 // * `arpack_solver<Asymmetric, ublas_storage>' for general real matrices;
35 // * `arpack_solver<Complex, ublas_storage>' for general complex matrices.
36 using solver_t = arpack_solver<Symmetric, ublas_storage>;
37 solver_t solver(N);
38
39 // Specify parameters for the solver
40 using params_t = solver_t::params_t;
41 params_t params(N_ev, // Number of low-lying eigenvalues
42 params_t::Smallest, // We want the smallest eigenvalues
43 true); // Yes, we want the eigenvectors
44 // (Ritz vectors) as well
45
46 // Linear operator representing multiplication of a given vector by our matrix
47 // The operator must act on the 'in' vector and store results in 'out'.
48 auto matrix_op = [](solver_t::vector_const_view_t in,
49 solver_t::vector_view_t out) {
50 std::fill(out.begin(), out.end(), .0); // Clear result
51
52 // out_i = \sum_j A_{ij} in_j
53 // A_{ij} = |i-j| / (1 + i + j), if |i-j| <= bandwidth, zero otherwise
54 for(int i = 0; i < N; ++i) {
55 int j_min = std::max(0, i - bandwidth);
56 int j_max = std::min(N - 1, i + bandwidth);
57 for(int j = j_min; j <= j_max; ++j) {
58 out(i) += double(std::abs(i - j)) / (1 + i + j) * in(j);
59 }
60 }
61 };
62
63 // Run diagonalization!
64 solver(matrix_op, params);
65
66 // Number of converged eigenvalues
67 std::cout << solver.nconv() << " out of " << params.n_eigenvalues
68 << " eigenvalues have converged" << std::endl;
69
70 // Print found eigenvalues
71 std::cout << "Eigenvalues (Ritz values):" << std::endl;
72 std::cout << solver.eigenvalues() << std::endl;
73
74 // Check A*v = \lambda*v
75 auto const& lambda = solver.eigenvalues();
76 auto const& v = solver.eigenvectors();
77 vector<double> lhs(N), rhs(N);
78
79 for(int i = 0; i < N_ev; ++i) { // For each eigenpair ...
80 const vector<double> eigenvec = column(v, i);
81 matrix_op(subrange(eigenvec, 0, N), subrange(lhs, 0, N)); // calculate A*v
82 rhs = lambda(i) * eigenvec; // and \lambda*v
83
84 std::cout << i << ": deviation = " << norm_2(rhs - lhs) / N << std::endl;
85 }
86
87 // Print some computation statistics
88 auto stats = solver.stats();
89
90 std::cout << "Number of Lanczos update iterations: " << stats.n_iter
91 << std::endl;
92 std::cout << "Total number of OP*x operations: " << stats.n_op_x_operations
93 << std::endl;
94 std::cout << "Total number of steps of re-orthogonalization: "
95 << stats.n_reorth_steps << std::endl;
96
97 return 0;
98}
TRIQS arrays¶
1#include <cmath>
2#include <iostream>
3
4// This example shows how to use ezARPACK and the TRIQS storage backend
5// to partially diagonalize a large sparse symmetric matrix
6// and find a number of its low-lying eigenvalues.
7
8#include <ezarpack/arpack_solver.hpp>
9#include <ezarpack/storages/triqs.hpp>
10#include <ezarpack/version.hpp>
11
12using namespace ezarpack;
13using namespace triqs::arrays;
14
15// Size of the matrix
16const int N = 10000;
17
18// We are going to use a band matrix with this bandwidth
19const int bandwidth = 5;
20
21// The number of low-lying eigenvalues we want to compute
22const int N_ev = 10;
23
24int main() {
25
26 // Print ezARPACK version
27 std::cout << "Using ezARPACK version " << EZARPACK_VERSION << std::endl;
28
29 // Construct a solver object for the symmetric case.
30 // For the TRIQS storage backend, other options would be
31 // * `arpack_solver<Asymmetric, triqs_storage>' for general real matrices;
32 // * `arpack_solver<Complex, triqs_storage>' for general complex matrices.
33 using solver_t = arpack_solver<Symmetric, triqs_storage>;
34 solver_t solver(N);
35
36 // Specify parameters for the solver
37 using params_t = solver_t::params_t;
38 params_t params(N_ev, // Number of low-lying eigenvalues
39 params_t::Smallest, // We want the smallest eigenvalues
40 true); // Yes, we want the eigenvectors
41 // (Ritz vectors) as well
42
43 // Linear operator representing multiplication of a given vector by our matrix
44 // The operator must act on the 'in' vector and store results in 'out'.
45 auto matrix_op = [](auto in, auto out) {
46 out() = 0; // Clear result
47
48 // out_i = \sum_j A_{ij} in_j
49 // A_{ij} = |i-j| / (1 + i + j), if |i-j| <= bandwidth, zero otherwise
50 for(int i = 0; i < N; ++i) {
51 int j_min = std::max(0, i - bandwidth);
52 int j_max = std::min(N - 1, i + bandwidth);
53 for(int j = j_min; j <= j_max; ++j) {
54 out(i) += double(std::abs(i - j)) / (1 + i + j) * in(j);
55 }
56 }
57 };
58
59 // Run diagonalization!
60 solver(matrix_op, params);
61
62 // Number of converged eigenvalues
63 std::cout << solver.nconv() << " out of " << params.n_eigenvalues
64 << " eigenvalues have converged" << std::endl;
65
66 // Print found eigenvalues
67 std::cout << "Eigenvalues (Ritz values):" << std::endl;
68 std::cout << solver.eigenvalues() << std::endl;
69
70 // Check A*v = \lambda*v
71 auto const& lambda = solver.eigenvalues();
72 auto const& v = solver.eigenvectors();
73 vector<double> lhs(N), rhs(N);
74
75 for(int i = 0; i < N_ev; ++i) { // For each eigenpair ...
76 auto const eigenvec = v(range(), i);
77 matrix_op(eigenvec, lhs()); // calculate A*v
78 rhs = lambda(i) * eigenvec; // and \lambda*v
79
80 std::cout << i << ": deviation = " << norm2(rhs - lhs) / N << std::endl;
81 }
82
83 // Print some computation statistics
84 auto stats = solver.stats();
85
86 std::cout << "Number of Lanczos update iterations: " << stats.n_iter
87 << std::endl;
88 std::cout << "Total number of OP*x operations: " << stats.n_op_x_operations
89 << std::endl;
90 std::cout << "Total number of steps of re-orthogonalization: "
91 << stats.n_reorth_steps << std::endl;
92
93 return 0;
94}
TRIQS/nda¶
1#include <cmath>
2#include <iostream>
3
4// This example shows how to use ezARPACK and the TRIQS/nda storage backend
5// to partially diagonalize a large sparse symmetric matrix
6// and find a number of its low-lying eigenvalues.
7
8#include <ezarpack/arpack_solver.hpp>
9#include <ezarpack/storages/nda.hpp>
10#include <ezarpack/version.hpp>
11
12using namespace ezarpack;
13using namespace nda;
14
15// Size of the matrix
16const int N = 10000;
17
18// We are going to use a band matrix with this bandwidth
19const int bandwidth = 5;
20
21// The number of low-lying eigenvalues we want to compute
22const int N_ev = 10;
23
24int main() {
25
26 // Print ezARPACK version
27 std::cout << "Using ezARPACK version " << EZARPACK_VERSION << std::endl;
28
29 // Construct a solver object for the symmetric case.
30 // For the nda storage backend, other options would be
31 // * `arpack_solver<Asymmetric, nda_storage>' for general real matrices;
32 // * `arpack_solver<Complex, nda_storage>' for general complex matrices.
33 using solver_t = arpack_solver<Symmetric, nda_storage>;
34 solver_t solver(N);
35
36 // Specify parameters for the solver
37 using params_t = solver_t::params_t;
38 params_t params(N_ev, // Number of low-lying eigenvalues
39 params_t::Smallest, // We want the smallest eigenvalues
40 true); // Yes, we want the eigenvectors
41 // (Ritz vectors) as well
42
43 // Linear operator representing multiplication of a given vector by our matrix
44 // The operator must act on the 'in' vector and store results in 'out'.
45 auto matrix_op = [](auto in, auto out) {
46 out() = 0; // Clear result
47
48 // out_i = \sum_j A_{ij} in_j
49 // A_{ij} = |i-j| / (1 + i + j), if |i-j| <= bandwidth, zero otherwise
50 for(int i = 0; i < N; ++i) {
51 int j_min = std::max(0, i - bandwidth);
52 int j_max = std::min(N - 1, i + bandwidth);
53 for(int j = j_min; j <= j_max; ++j) {
54 out(i) += double(std::abs(i - j)) / (1 + i + j) * in(j);
55 }
56 }
57 };
58
59 // Run diagonalization!
60 solver(matrix_op, params);
61
62 // Number of converged eigenvalues
63 std::cout << solver.nconv() << " out of " << params.n_eigenvalues
64 << " eigenvalues have converged" << std::endl;
65
66 // Print found eigenvalues
67 std::cout << "Eigenvalues (Ritz values):" << std::endl;
68 std::cout << solver.eigenvalues() << std::endl;
69
70 // Check A*v = \lambda*v
71 auto const& lambda = solver.eigenvalues();
72 auto const& v = solver.eigenvectors();
73 vector<double> lhs(N), rhs(N);
74
75 for(int i = 0; i < N_ev; ++i) { // For each eigenpair ...
76 auto const eigenvec = v(range::all, i);
77 matrix_op(eigenvec, lhs()); // calculate A*v
78 rhs = lambda(i) * eigenvec; // and \lambda*v
79
80 std::cout << i << ": deviation = " << std::sqrt(sum(abs2(rhs - lhs))) / N
81 << std::endl;
82 }
83
84 // Print some computation statistics
85 auto stats = solver.stats();
86
87 std::cout << "Number of Lanczos update iterations: " << stats.n_iter
88 << std::endl;
89 std::cout << "Total number of OP*x operations: " << stats.n_op_x_operations
90 << std::endl;
91 std::cout << "Total number of steps of re-orthogonalization: "
92 << stats.n_reorth_steps << std::endl;
93
94 return 0;
95}
xtensor¶
1#include <cmath>
2#include <iostream>
3
4// This example shows how to use ezARPACK and the xtensor storage backend
5// to partially diagonalize a large sparse symmetric matrix
6// and find a number of its low-lying eigenvalues.
7
8#include <ezarpack/arpack_solver.hpp>
9#include <ezarpack/storages/xtensor.hpp>
10#include <ezarpack/version.hpp>
11
12#include <xtensor/io/xio.hpp>
13#include <xtensor/reducers/xnorm.hpp>
14
15using namespace ezarpack;
16using namespace xt;
17
18// Size of the matrix
19const int N = 10000;
20
21// We are going to use a band matrix with this bandwidth
22const int bandwidth = 5;
23
24// The number of low-lying eigenvalues we want to compute
25const int N_ev = 10;
26
27int main() {
28
29 // Print ezARPACK version
30 std::cout << "Using ezARPACK version " << EZARPACK_VERSION << std::endl;
31
32 // Construct a solver object for the symmetric case.
33 // For the xtensor storage backend, other options would be
34 // * `arpack_solver<ezarpack::Asymmetric, xtensor_storage>' for general
35 // real matrices;
36 // * `arpack_solver<ezarpack::Complex, xtensor_storage>' for general
37 // complex matrices.
38 using solver_t = arpack_solver<ezarpack::Symmetric, xtensor_storage>;
39 solver_t solver(N);
40
41 // Specify parameters for the solver
42 using params_t = solver_t::params_t;
43 params_t params(N_ev, // Number of low-lying eigenvalues
44 params_t::Smallest, // We want the smallest eigenvalues
45 true); // Yes, we want the eigenvectors
46 // (Ritz vectors) as well
47
48 // Linear operator representing multiplication of a given vector by our matrix
49 // The operator must act on the 'in' vector and store results in 'out'.
50 auto matrix_op = [](auto in, auto out) {
51 out.fill(.0); // Clear result
52
53 // out_i = \sum_j A_{ij} in_j
54 // A_{ij} = |i-j| / (1 + i + j), if |i-j| <= bandwidth, zero otherwise
55 for(int i = 0; i < N; ++i) {
56 int j_min = std::max(0, i - bandwidth);
57 int j_max = std::min(N - 1, i + bandwidth);
58 for(int j = j_min; j <= j_max; ++j) {
59 out(i) += double(std::abs(i - j)) / (1 + i + j) * in(j);
60 }
61 }
62 };
63
64 // Run diagonalization!
65 solver(matrix_op, params);
66
67 // Number of converged eigenvalues
68 std::cout << solver.nconv() << " out of " << params.n_eigenvalues
69 << " eigenvalues have converged" << std::endl;
70
71 // Print found eigenvalues
72 std::cout << "Eigenvalues (Ritz values):" << std::endl;
73 std::cout << solver.eigenvalues() << std::endl;
74
75 // Check A*v = \lambda*v
76 auto const& lambda = solver.eigenvalues();
77 auto const& v = solver.eigenvectors();
78
79 auto lhs = xt::xtensor<double, 1>::from_shape({N});
80 auto rhs = xt::xtensor<double, 1>::from_shape({N});
81
82 for(int i = 0; i < N_ev; ++i) { // For each eigenpair ...
83 auto const eigenvec = xt::view(v, xt::all(), i);
84 matrix_op(eigenvec, xt::view(lhs, xt::all())); // calculate A*v
85 rhs = lambda(i) * eigenvec; // and \lambda*v
86
87 std::cout << i << ": deviation = " << xt::norm_l2(rhs - lhs) / N
88 << std::endl;
89 }
90
91 // Print some computation statistics
92 auto stats = solver.stats();
93
94 std::cout << "Number of Lanczos update iterations: " << stats.n_iter
95 << std::endl;
96 std::cout << "Total number of OP*x operations: " << stats.n_op_x_operations
97 << std::endl;
98 std::cout << "Total number of steps of re-orthogonalization: "
99 << stats.n_reorth_steps << std::endl;
100
101 return 0;
102}