Commit 20991c88 authored by Dmitry Bagaev's avatar Dmitry Bagaev

New test for solvers and small fixes

Simple test for int32 overflow testing in nnz count for fcbiilu2
parent e9387219
...@@ -7,6 +7,7 @@ if(USE_SOLVER) ...@@ -7,6 +7,7 @@ if(USE_SOLVER)
add_subdirectory(solver_test000) add_subdirectory(solver_test000)
#add_subdirectory(solver_test001) #add_subdirectory(solver_test001)
add_subdirectory(solver_test002) add_subdirectory(solver_test002)
add_subdirectory(solver_test003)
endif(USE_SOLVER) endif(USE_SOLVER)
if(USE_MESH AND USE_MPI) if(USE_MESH AND USE_MPI)
......
...@@ -36,28 +36,28 @@ memory requirements. ...@@ -36,28 +36,28 @@ memory requirements.
(*) Arguments (*) Arguments
Usage: ./solver_test002 method_number<0:INNER_ILU2,1:INNER_DDPQILUC,2:INNER_MPTILUC,3:INNER_MPTILU2,4:Trilinos_Aztec,5:Trilinos_Belos,6:Trilinos_ML,7:Trilinos_Ifpack,8:PETSc,9:ANI,10:FCBIILU2,11:K3BIILU2> N<for NxNxN problem> [solver_options.txt] Usage: ./solver_test002 <solver_type> N<for NxNxN problem> [solver_options.xml]
* First parameter is the Solver type: * First parameter is the Solver type:
0 – INNER_ILU2, inner Solver based on BiCGStab(L) solver with second inner_ilu2, inner Solver based on BiCGStab(L) solver with second
order IIU factorization as preconditioner; order IIU factorization as preconditioner;
1 - INNER_DDPQILUC, inner Solver based on BiCGStab(L) solver with second order Crout-ILU with inversed-based condition estimation and unsymmetric reordering for diagonal dominance as preconditioner; inner_ddpqiluc, inner Solver based on BiCGStab(L) solver with second order Crout-ILU with inversed-based condition estimation and unsymmetric reordering for diagonal dominance as preconditioner;
2 - INNER_MPTILUC, inner Solver based on BiCGStab(L) solver with second order Crout-ILU with inversed-based condition estimation and maximum product transversal reordering as preconditioner; inner_mptiluc, inner Solver based on BiCGStab(L) solver with second order Crout-ILU with inversed-based condition estimation and maximum product transversal reordering as preconditioner;
3 - INNER_MPTILU2, inner Solver based on BiCGStab(L) solver with second order ILU and maximum product transversal reordering as preconditione; inner_mptilu2, inner Solver based on BiCGStab(L) solver with second order ILU and maximum product transversal reordering as preconditione;
4 – Trilinos_Aztec, external Solver AztecOO from Trilinos package; trilinos_aztec, external Solver AztecOO from Trilinos package;
currentty without preconditioner; currentty without preconditioner;
5 – Trilinos_Belos, external Solver Belos from Trilinos package, currently without preconditioner; trilinos_belos, external Solver Belos from Trilinos package, currently without preconditioner;
6 – Trilinos_ML, external Solver AztecOO with ML preconditioner; trilinos_ml, external Solver AztecOO with ML preconditioner;
7 – Trilinos_Ifpack, external Solver AztecOO with Ifpack preconditioner; trilinos_ifpack, external Solver AztecOO with Ifpack preconditioner;
8 - PETSc, external Solver PETSc; petsc, external Solver PETSc;
9 – ANI, external Solver from ANI3D based on ILU2 (sequential Fortran version); ani, external Solver from ANI3D based on ILU2 (sequential Fortran version);
10 – FCBIILU2, external FCBIILU2 Solver (BIILU2 parallel F2C version); fcbiilu2, external FCBIILU2 Solver (BIILU2 parallel F2C version);
11 – K3BIILU2, internal K3BIILU2 Solver (BIILU2 parallel version). k3biilu2, internal K3BIILU2 Solver (BIILU2 parallel version).
* Second parameter is the dimension N of the 3D Poisson problem for NxNxN * Second parameter is the dimension N of the 3D Poisson problem for NxNxN
mesh. mesh.
* Third optional parameter is the file with solver parameters, see * Third optional parameter is the file with solver parameters, see
examples/MatSolve/database.txt as example. examples/MatSolve/database.xml as example.
(*) Running test (*) Running test
...@@ -66,7 +66,7 @@ For example, you can specify the 100x100x100 test case and solve it by the ...@@ -66,7 +66,7 @@ For example, you can specify the 100x100x100 test case and solve it by the
internal ILU2 based solver with the default parameters on 4 processors: internal ILU2 based solver with the default parameters on 4 processors:
$ cd tests/solver_test002 $ cd tests/solver_test002
$ mpirun -np 4 ./solver_test002 0 100 $ mpirun -np 4 ./solver_test002 inner_ilu2 100
(*) Source (*) Source
......
project(solver_test003)
set(SOURCE main.cpp)
add_executable(solver_test003 ${SOURCE})
target_link_libraries(solver_test003 inmost)
if(USE_MPI)
message("linking solver_test003 with MPI")
target_link_libraries(solver_test003 ${MPI_LIBRARIES})
if(MPI_LINK_FLAGS)
set_target_properties(solver_test003 PROPERTIES LINK_FLAGS "${MPI_LINK_FLAGS}")
endif()
endif(USE_MPI)
if(USE_SOLVER)
if(USE_SOLVER_ANI)
message("linking solver_test003 with ani3d and BLAS")
target_link_libraries(solver_test003 ani3d ${BLAS_LIBRARIES})
if(BLAS_LINKER_FLAGS)
set_target_properties(solver_test003 PROPERTIES LINK_FLAGS "${BLAS_LINKER_FLAGS}")
endif()
endif()
if(USE_SOLVER_PETSC)
message("linking solver_test003 with PETSc")
target_link_libraries(solver_test003 ${PETSC_LIBRARIES})
endif()
if(USE_SOLVER_TRILINOS)
message("linking solver_test003 with Trilinos")
target_link_libraries(solver_test003 ${Trilinos_LIBRARIES} ${Trilinos_TPL_LIBRARIES})
endif()
if(USE_SOLVER_METIS)
message("linking solver_test003 with Metis")
target_link_libraries(solver_test003 ${METIS_LIBRARIES})
endif()
if(USE_SOLVER_MONDRIAAN)
message("linking solver_test003 with Mondriaan")
target_link_libraries(solver_test003 ${MONDRIAAN_LIBRARIES})
endif()
if(USE_SOLVER_SUPERLU)
message("linking solver_test003 with SuperLU")
target_link_libraries(solver_test003 ${SUPERLU_LIBRARIES})
endif()
endif()
#add_test(NAME solver_test002_serial_inner_ilu2 COMMAND $<TARGET_FILE:solver_test002> inner_ilu2 20)
#add_test(NAME solver_test002_serial_inner_ddpqiluc COMMAND $<TARGET_FILE:solver_test002> inner_ddpqiluc2 20)
if(HAVE_SOLVER_MPTILUC2)
#add_test(NAME solver_test002_serial_inner_mptiluc COMMAND $<TARGET_FILE:solver_test002> inner_mptiluc 20)
endif()
if(HAVE_SOLVER_MPTILU2)
#add_test(NAME solver_test002_serial_inner_mptilu2 COMMAND $<TARGET_FILE:solver_test002> inner_mptilu2 20)
endif()
if(SOLVER_DEFINITIONS MATCHES "^.*HAVE_SOLVER_FCBIILU2.*$")
#add_test(NAME solver_test002_serial_fcbiilu2 COMMAND $<TARGET_FILE:solver_test002> fcbiilu2 20)
endif()
if(SOLVER_DEFINITIONS MATCHES "^.*HAVE_SOLVER_K3BIILU2.*$")
#add_test(NAME solver_test002_serial_k3biilu2 COMMAND $<TARGET_FILE:solver_test002> k3biilu2 20)
endif()
if(USE_SOLVER_TRILINOS AND USE_MPI)
#add_test(NAME solver_test002_serial_trilinos_aztec COMMAND $<TARGET_FILE:solver_test002> trilinos_aztec 20)
#add_test(NAME solver_test002_serial_trilinos_belos COMMAND $<TARGET_FILE:solver_test002> trilinos_belos 20)
#add_test(NAME solver_test002_serial_trilinos_ml COMMAND $<TARGET_FILE:solver_test002> trilinos_ml 20)
#add_test(NAME solver_test002_serial_trilinos_ifpack COMMAND $<TARGET_FILE:solver_test002> trilinos_ifpack 20)
endif()
if(USE_SOLVER_PETSC)
#add_test(NAME solver_test002_serial_petsc COMMAND $<TARGET_FILE:solver_test002> petsc 20)
endif()
if(USE_SOLVER_ANI)
#add_test(NAME solver_test002_serial_ani COMMAND $<TARGET_FILE:solver_test002> ani 20)
endif()
if( USE_MPI )
if( EXISTS ${MPIEXEC} )
#add_test(NAME solver_test002_parallel_inner_ilu2 COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> inner_ilu2 20)
#add_test(NAME solver_test002_parallel_inner_ddpqiluc COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> inner_ddpqiluc2 20)
#add_test(NAME solver_test002_parallel_inner_mptiluc COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> inner_mptiluc 20)
#add_test(NAME solver_test002_parallel_inner_mptilu2 COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> inner_mptilu2 20)
if(USE_SOLVER_TRILINOS)
#add_test(NAME solver_test002_parallel_trilinos_aztec COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> trilinos_aztec 20)
#add_test(NAME solver_test002_parallel_trilinos_belos COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> trilinos_belos 20)
#add_test(NAME solver_test002_parallel_trilinos_ml COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> trilinos_ml 20)
#add_test(NAME solver_test002_parallel_trilinos_ifpack COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> trilinos_ifpack 20)
endif()
if(USE_SOLVER_PETSC)
#add_test(NAME solver_test002_parallel_petsc COMMAND ${MPIEXEC} -np 4 $<TARGET_FILE:solver_test002> petsc 20)
endif()
if(SOLVER_DEFINITIONS MATCHES "^.*HAVE_SOLVER_FCBIILU2.*$")
#add_test(NAME solver_test002_parallel_fcbiilu2 COMMAND $<TARGET_FILE:solver_test002> fcbiilu2 20)
endif()
if(SOLVER_DEFINITIONS MATCHES "^.*HAVE_SOLVER_K3BIILU2.*$")
#add_test(NAME solver_test002_parallel_k3biilu2 COMMAND $<TARGET_FILE:solver_test002> k3biilu2 20)
endif()
endif()
endif()
#include <string>
#include <iostream>
#include "inmost.h"
using namespace INMOST;
/***********************************************************************
(*) 5403 Band matrix solver
(*) Test solvers on band matrix.
This test is located in Tests/solver_test003
(*) Brief
Test solvers and tune parameters for on-the-fly generated matrices for
a band matrix.
(*) Description
This test will run solvers in both serial and parallel modes with NP
processes for a model band matrix with small diagonal diminamce.
The coefficient matrix and vectors for a parallel run will be generated
directly at the target processor, so no external reordering is required
to be installed.
The artificial right-hand side rhs=(1,1,...,1) is used.
The specific solver is defined by a user.
User may also provide options file to alter default solver options.
Main purpose of this test is to assess robustness of internal or external
solvers during development.
Another purpose is to check the behaviour of the liner solver for large
and extremely large test problems without taking into account the disk
memory requirements.
(*) Arguments
Usage: ./solver_test003 <solver_type> N<for NxNxN problem> [solver_options.xml]
* First parameter is the Solver type:
inner_ilu2, inner Solver based on BiCGStab(L) solver with second
order IIU factorization as preconditioner;
inner_ddpqiluc, inner Solver based on BiCGStab(L) solver with second order Crout-ILU with inversed-based condition estimation and unsymmetric reordering for diagonal dominance as preconditioner;
inner_mptiluc, inner Solver based on BiCGStab(L) solver with second order Crout-ILU with inversed-based condition estimation and maximum product transversal reordering as preconditioner;
inner_mptilu2, inner Solver based on BiCGStab(L) solver with second order ILU and maximum product transversal reordering as preconditione;
trilinos_aztec, external Solver AztecOO from Trilinos package;
currentty without preconditioner;
trilinos_belos, external Solver Belos from Trilinos package, currently without preconditioner;
trilinos_ml, external Solver AztecOO with ML preconditioner;
trilinos_ifpack, external Solver AztecOO with Ifpack preconditioner;
petsc, external Solver PETSc;
ani, external Solver from ANI3D based on ILU2 (sequential Fortran version);
fcbiilu2, external FCBIILU2 Solver (BIILU2 parallel F2C version);
k3biilu2, internal K3BIILU2 Solver (BIILU2 parallel version).
* Second parameter is the dimension N of the 3D Poisson problem for NxNxN
mesh.
* Third optional parameter is the file with solver parameters, see
examples/MatSolve/database.xml as example.
(*) Running test
You can run the test directly from the command line.
For example, you can specify the 100x100x100 test case and solve it by the
internal ILU2 based solver with the default parameters on 4 processors:
$ cd tests/solver_test003
$ mpirun -np 4 ./solver_test003 inner_ilu2 100
(*) Source
* Source code is adopted from examples/MatSolve
***********************************************************************/
#if defined(USE_MPI)
#define BARRIER MPI_Barrier(MPI_COMM_WORLD);
#else
#define BARRIER
#endif
void Band(int n, int r, Sparse::Matrix & mat); // Generate band matrix
int main(int argc, char ** argv)
{
int rank,procs;
if( argc < 3 )
{
std::cout << "Usage: " << argv[0] << " S<method name> N<matrix dimension> r<semiband width> [solver_options.xml]" << std::endl;
return -1;
}
std::string type = std::string(argv[1]);
int n = atoi(argv[2]);
int r = atoi(argv[3]);
Solver::Initialize(&argc,&argv,argc > 3 ? argv[3] : NULL); // Initialize the linear solver in accordance with args
{
#if defined(USE_MPI)
MPI_Comm_rank(MPI_COMM_WORLD,&rank); // Get the rank of the current process
MPI_Comm_size(MPI_COMM_WORLD,&procs); // Get the total number of processors used
#else
rank = 0;
procs = 1;
#endif
if( rank == 0 )
std::cout << "Testing " << type << std::endl;
//std::cout << rank << "/" << procs << " " << argv[0] << std::endl;
Sparse::Matrix mat("A"); // Declare the matrix of the linear system to be solved
Sparse::Vector b("rhs"); // Declare the right-hand side vector
Sparse::Vector x("sol"); // Declare the solution vector
if( !rank ) std::cout << "Sample band matrix: n= " << n << " r= " << r << " nz= " << n*(2.*r+1.)-r*(r+1.) << " p= " << procs << std::endl;
BARRIER
double tt = Timer(), t = Timer();
Band(n,r,mat);
BARRIER
if( !rank ) std::cout << "Generate matrix: " << Timer() - t << std::endl;
//mat.Save("test.mtx");
// Set local RHS to 1 if it was not specified
INMOST_DATA_ENUM_TYPE mbeg,mend,k;
mat.GetInterval(mbeg,mend);
b.SetInterval(mbeg,mend);
for( k = mbeg; k < mend; ++k ) b[k] = 1.0;
bool success = false;
int iters;
double resid, realresid = 0;
std::string reason;
{
Solver s(type); // Declare the linear solver by specified type
s.SetParameter("gmres_substeps", "3");
s.SetParameter("reorder_nonzeros", "0");
s.SetParameter("rescale_iterations", "8");
s.SetParameter("adapt_ddpq_tolerance", "0");
s.SetParameter("drop_tolerance", "0.001");
s.SetParameter("reuse_tolerance", "0.00001");
s.SetParameter("ddpq_tolerance", "0.7");
t = Timer();
s.SetMatrix(mat); // Compute the preconditioner for the original matrix
BARRIER
if( !rank ) std::cout << "preconditioner: " << Timer() - t << std::endl;
t = Timer();
success = s.Solve(b,x); // Solve the linear system with the previously computted preconditioner
BARRIER
if( !rank ) std::cout << "solver: " << Timer() - t << std::endl;
iters = s.Iterations(); // Get the number of iterations performed
resid = s.Residual(); // Get the final residual achieved
reason = s.ReturnReason();
//x.Save("output.rhs");
}
tt = Timer() - tt;
{ // Compute the true residual
double aresid = 0, bresid = 0;
Sparse::Vector test;
t = Timer();
Solver::OrderInfo info;
info.PrepareMatrix(mat,0);
info.PrepareVector(x);
info.Update(x);
mat.MatVec(1.0,x,0.0,test); // Multiply the original matrix by a vector
{
INMOST_DATA_ENUM_TYPE mbeg,mend,k;
info.GetLocalRegion(info.GetRank(),mbeg,mend);
for( k = mbeg; k < mend; ++k )
{
aresid += (test[k]-b[k])*(test[k]-b[k]);
bresid += b[k]*b[k];
}
}
double temp[2] = {aresid,bresid}, recv[2] = {aresid,bresid};
#if defined(USE_MPI)
MPI_Reduce(temp,recv,2,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
if( info.GetRank() == 0 ) std::cout << "||Ax-b|| " << sqrt(recv[0]) << " ||b|| " << sqrt(recv[1]) << " ||Ax-b||/||b|| " << sqrt(recv[0]/recv[1]) << std::endl;
#endif
realresid = sqrt(recv[0]/recv[1]);
//realresid = sqrt(realresid);
info.RestoreVector(x);
if( !rank ) std::cout << "norms: " << Timer() - t << std::endl;
}
{
if( rank == 0 )
{
std::cout << procs << " processors";
if( success )
{
std::cout << " solved in " << tt << " secs";
std::cout << " with " << iters << " iterations to " << resid << " norm";
}
else std::cout << " failed to solve";
std::cout << " matrix \"" << argv[2] << "\"";
if( argc > 3 ) std::cout << " vector \"" << argv[3] << "\"";
std::cout << " true residual ||Ax-b||/||b|| " << realresid;
std::cout << std::endl;
std::cout << "reason: " << reason << std::endl;
}
}
}
Solver::Finalize(); // Finalize solver and close MPI activity
return 0;
}
// Generate a sample band matrix with small diagonal dominance
void Band(int n, int r, Sparse::Matrix & A)
{
int myid=0, nproc=1;
#if defined(USE_MPI)
MPI_Comm_size (MPI_COMM_WORLD, &nproc);
MPI_Comm_rank (MPI_COMM_WORLD, &myid);
#endif
int ndiag = 2*r + 1;
if (ndiag > n) ndiag = n;
unsigned idmax, idmin, block = n / nproc;
idmin = myid * block;
idmax = idmin + block;
if (myid == nproc-1) idmax = n;
A.SetInterval(idmin,idmax);
for (int i=idmin; i<idmax; i++) {
for (int j=0; j<n; j++) {
if (i == j) {
A[i][j] = (ndiag - 1.) + 1e-3;
} else {
A[i][j] = -1.;
}
}
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment