Commit 8b7ef6c0 authored by Kirill Terekhov's avatar Kirill Terekhov

Resolve conflicts

parents 682f9db1 8fe5d3fd
......@@ -3,10 +3,8 @@ INMOST
INMOST is a software platform for development of parallel numerical models on general meshes
The presented toolkit may be usefull in creation of parallel software
MPI-platforms for developing parallel numerical models on general meshes.
Technology complex INMOST (Integrated Numerical Modelling and Object-oriented
Supercomputing Technologies) is a tool for supercomputer simulations
INMOST (Integrated Numerical Modelling and Object-oriented
Supercomputing Technologies) is a supplimentary tool for supercomputer numerical mathematical models
characterized by a maximum generality of supported computational meshes,
distributed data structure flexibility and cost-effectiveness, as well as cross
platform portability.
......@@ -15,10 +13,11 @@ platform portability.
Refer to https://github.com/INMOST-DEV/INMOST/wiki/0100-Compilation
## User's guide
Refer to https://wiki.inmost.org for general guides.
Refer to http://wiki.inmost.org for general guides.
## Doxygen documentation
Doxygen-generated documentation is available online at http://doxy.inmost.org
[![Analytics](https://ga-beacon.appspot.com/UA-59408561-4/INMOST/README.md?pixel)](https://github.com/igrigorik/ga-beacon)
## CDash page
Online testing statistics is availible at http://my.cdash.org/index.php?project=INMOST
1 !01 ipart (...partitioning option = -1,0,1,2)
'tst.mtx' !02 name_a
'tst.rhs' !03 name_b
1e-6 !04 eps
0999 !05 nit
1 !06 kgmr (BiCGStab: kgmr=1; GMR[kdeg]: kgmr>3*kdeg+2)
1 !07 kdeg (ignored if kgmr=1)
3 !08 kovl
3e-3 !09 tau
'res' !10 name_out
'none' !11 name_x
......@@ -3,4 +3,5 @@ Trilinos_Ifpack: trilinos_ifpack_options.xml
Trilinos_ML:
Trilinos_Aztec:
Trilinos_Belos:
FCBIILU2: ctrl_dat
K3BIILU2: ctrl_dat
#include <string>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <math.h>
#include "inmost.h"
using namespace INMOST;
......@@ -12,14 +15,15 @@ using namespace INMOST;
int main(int argc, char ** argv)
{
int rank,procs;
if( argc < 3 )
if( argc < 3 || argc > 1 && ( atoi(argv[1]) < 0 || atoi(argv[1]) > 11 ) )
{
std::cout << "Usage: " << argv[0] << " method_number<0:INNER_ILU2,1:INNER_DDPQILUC,2:INNER_MPTILUC,3:INNER_MPTILU2,4:Trilinos_Aztec,5:Trilinos_Belos,6:Trilinos_ML,7:Trilinos_Ifpack,8:PETSc,9:ANI,10:FCBIILU2,11:K3BIILU2> matrix.mtx [right_hand_side.rhs] [solver_options.txt]" << std::endl;
std::cout << "Usage: " << argv[0] << " method_number<0:INNER_ILU2,1:INNER_DDPQILUC,2:INNER_MPTILUC,3:INNER_MPTILU2,4:Trilinos_Aztec,5:Trilinos_Belos,6:Trilinos_ML,7:Trilinos_Ifpack,8:PETSc,9:ANI,10:FCBIILU2,11:K3BIILU2> matrix.mtx [right_hand_side.rhs] [exact_solution] [solver_options.txt]" << std::endl;
std::cout << "Example: " << argv[0] << " 0 a.mtx b.rhs" << std::endl;
std::cout << "or just: " << argv[0] << " 11 a.mtx - 1 database.txt" << std::endl;
return -1;
}
Solver::Type type;
switch(atoi(argv[1]))
switch(atoi(argv[1])) // Actually: type = atoi(argv[1])
{
case 0: type = Solver::INNER_ILU2; break;
case 1: type = Solver::INNER_DDPQILUC; break;
......@@ -34,9 +38,9 @@ int main(int argc, char ** argv)
case 10: type = Solver::FCBIILU2; break;
case 11: type = Solver::K3BIILU2; break;
}
Solver::Initialize(&argc,&argv,argc > 4 ? argv[4] : NULL); // Initialize the linear solver in accordance with args
Solver::Initialize(&argc,&argv,argc > 5 ? argv[5] : NULL); // Initialize the linear solver in accordance with args
{
int rank, procs;
#if defined(USE_MPI)
MPI_Comm_rank(MPI_COMM_WORLD,&rank); // Get the rank of the current process
MPI_Comm_size(MPI_COMM_WORLD,&procs); // Get the total number of processors used
......@@ -54,23 +58,42 @@ int main(int argc, char ** argv)
double t = Timer(), tt = Timer(), tsol = 0;
mat.Load(std::string(argv[2])); //if interval parameters not set, matrix will be divided automatically
BARRIER
if( !rank ) std::cout << "load matrix: " << Timer() - t << std::endl;
if( !rank ) std::cout << "load matrix: " << Timer() - t << std::endl;
//mat.Save("test.mtx");
t = Timer();
if( argc > 3 )
if( argc > 4 && std::string(argv[3]) == std::string("-")
&& std::string(argv[4]) == std::string("1") )
{
//std::cout << rank << " load vector from " << std::string(argv[3]) << std::endl;
if( !rank ) std::cout << "[" << rank << "]: set RHS = A * [1]" << std::endl;
Solver::Vector ex("exact"); // Declare the temporary exact solution vector
INMOST_DATA_ENUM_TYPE mbeg,mend,k;
mat.GetInterval(mbeg,mend);
ex.SetInterval(mbeg,mend);
for( k = mbeg; k < mend; ++k ) ex[k] = 1.0;
Solver::Matrix a = mat; // Declare a temporary copy of the original matrix
Solver::OrderInfo info;
info.PrepareMatrix(a,0);
info.PrepareVector(ex);
info.Update(ex);
a.MatVec(1.0,ex,0.0,b); // Multiply the original matrix by a vector
//b.Save("b.rhs"); // Save the computed RHS if required
}
else if( argc > 3 && std::string(argv[3]) != std::string("1") )
{
if( !rank ) std::cout << "[" << rank << "]: load vector from " << std::string(argv[3]) << std::endl;
b.Load(std::string(argv[3])); // Load RHS vector
}
else // Set local RHS to 1 if it was not specified
{
if( !rank ) std::cout << "[" << rank << "]: set RHS=1" << std::endl;
INMOST_DATA_ENUM_TYPE mbeg,mend,k;
mat.GetInterval(mbeg,mend);
b.SetInterval(mbeg,mend);
for( k = mbeg; k < mend; ++k ) b[k] = 1.0;
}
BARRIER
if( !rank ) std::cout << "load vector: " << Timer() - t << std::endl;
if( !rank ) std::cout << "load vector: " << Timer() - t << std::endl;
tt = Timer();
bool success = false;
int iters;
double resid, realresid = 0;
......@@ -79,15 +102,15 @@ int main(int argc, char ** argv)
Solver s(type); // Declare the linear solver by specified type
s.SetParameterEnum("gmres_substeps",4);
s.SetParameterReal("relative_tolerance",1.0e-9);
s.SetParameterReal("relative_tolerance",1.0e-6);
s.SetParameterReal("absolute_tolerance",1.0e-16);
s.SetParameterEnum("reorder_nonzeros",0);
s.SetParameterEnum("rescale_iterations",8);
s.SetParameterEnum("adapt_ddpq_tolerance",0);
s.SetParameterReal("drop_tolerance",1.0e-3);
s.SetParameterReal("reuse_tolerance",1.0e-4);
s.SetParameterReal("drop_tolerance",3.0e-3);
s.SetParameterReal("reuse_tolerance",1.0e-5);
s.SetParameterReal("ddpq_tolerance",0.0);
s.SetParameterEnum("condition_estimation",1);
......@@ -102,12 +125,11 @@ int main(int argc, char ** argv)
t = Timer();
success = s.Solve(b,x); // Solve the linear system with the previously computted preconditioner
BARRIER
tsol += Timer()-t;
if( !rank ) std::cout << "solver: " << Timer() - t << "\t\t\t" << std::endl;
iters = s.Iterations(); // Get the number of iterations performed
resid = s.Residual(); // Get the final residual achieved
reason = s.GetReason();
//x.Save("output.rhs");
if( !rank ) std::cout << "iterations: " << Timer() - t << std::endl;
iters = s.Iterations(); // Get the number of iterations performed
resid = s.Residual(); // Get the final residual achieved
reason = s.GetReason(); // Get the convergence reason
//x.Save("output.sol"); // Save the solution if required
}
tt = Timer() - tt;
......@@ -134,7 +156,7 @@ int main(int argc, char ** argv)
double temp[2] = {aresid,bresid}, recv[2] = {aresid,bresid};
#if defined(USE_MPI)
MPI_Reduce(temp,recv,2,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
if( info.GetRank() == 0 ) std::cout << "||Ax-b|| " << sqrt(recv[0]) << " ||b|| " << sqrt(recv[1]) << " ||Ax-b||/||b|| " << sqrt(recv[0]/(recv[1]+1.0e-100)) << std::endl;
if( info.GetRank() == 0 ) std::cout << "||Ax-b||=" << sqrt(recv[0]) << " ||b||=" << sqrt(recv[1]) << " ||Ax-b||/||b||=" << sqrt(recv[0]/(recv[1]+1.0e-100)) << std::endl;
#endif
realresid = sqrt(recv[0]/(recv[1]+1.0e-100));
//realresid = sqrt(realresid);
......@@ -144,9 +166,9 @@ int main(int argc, char ** argv)
}
{
if( rank == 0 )
if( !rank )
{
std::cout << procs << " processors";
std::cout << procs << " processors for Solver::type=" << type;
if( success )
{
std::cout << " solved in " << tt << " secs (without reading " << tsol << " secs)";
......@@ -155,14 +177,67 @@ int main(int argc, char ** argv)
else std::cout << " failed to solve";
std::cout << " matrix \"" << argv[2] << "\"";
if( argc > 3 ) std::cout << " vector \"" << argv[3] << "\"";
std::cout << " true residual ||Ax-b||/||b|| " << realresid;
std::cout << " true residual ||Ax-b||/||b||=" << realresid;
std::cout << std::endl;
std::cout << "reason: " << reason << std::endl;
}
}
{ // Compare solution with exact one if available
if( argc > 4 )
{
Solver::Vector ex("exact"); // Declare the exact solution vector
Solver::Vector err("error"); // Declare the solution error vector
INMOST_DATA_ENUM_TYPE mbeg,mend,k;
mat.GetInterval(mbeg,mend);
err.SetInterval(mbeg,mend);
if( std::string(argv[4]) == std::string("1"))
{
ex.SetInterval(mbeg,mend);
for( k = mbeg; k < mend; ++k ) ex[k] = 1.0;
}
else
{
//std::cout << "[" << rank << "]: load exact solution vector from " << std::string(argv[4]) << std::endl;
ex.Load(std::string(argv[4])); // Load exact solution vector
}
BARRIER
double dif1 = 0, dif2 = 0, dif8 = 0, norm = 0;
//std::cout << "[" << rank << "]: mbeg=" << mbeg << " mend=" << mend << std::endl;
for( k = mbeg; k < mend; ++k )
{
double dloc = err[k] = fabs(x[k] - ex[k]);
dif1 += dloc;
dif2 += dloc*dloc;
dif8 = (dif8 > dloc) ? dif8 : dloc;
norm += fabs(ex[k]);
}
#if defined(USE_MPI)
if( procs > 1 )
{
double temp = dif1;
MPI_Reduce(&temp,&dif1,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
temp = dif2;
MPI_Reduce(&temp,&dif2,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
temp = dif8;
MPI_Reduce(&temp,&dif8,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD);
temp = norm;
MPI_Reduce(&temp,&norm,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
}
#endif
dif2 = sqrt(dif2);
norm += 1.0e-100;
if( !rank )
{
std::cout << "Difference with exact solution \"" << std::string(argv[4]) << "\": " << std::scientific << std::setprecision(6) << std::endl;
std::cout << "dif1 = " << dif1 << " dif2 = " << dif2 << " dif8 = " << dif8 << " ||ex||_1 = " << norm << std::endl;
std::cout << "rel1 = " << dif1/norm << " rel2 = " << dif2/norm << " rel8 = " << dif8/norm << std::endl;
}
//err.Save("error.sol");
}
}
}
Solver::Finalize(); // Finalize solver and close MPI activity
return 0;
}
#-info
#-mat_view
#-ksp_view
-mat_no_inode
-ksp_monitor
-ksp_monitor_true_residual
-ksp_view
-ksp_atol 1e-13
-ksp_rtol 1e-6
-ksp_divtol 1e+200
#-mat_no_inode
-ksp_monitor
#-ksp_monitor_true_residual
-ksp_view
-ksp_atol 1e-13
-ksp_rtol 1e-6
-ksp_divtol 1e+200
-ksp_max_it 1000
-ksp_type bcgs
#-ksp_type dgmres
#-pc_type asm
-ksp_pc_side right
#-pc_type ilu
#-pc_type hypre
#-pc_hypre_type boomeramg
#-pc_asm_overlap 2
#-pc_asm_pc_type hypre
#-pc_asm_pc_hypre_type boomeramg
#-pc_factor_levels 3
#-pc_factor_fill 2
#-pc_factor_levels 1
#-pc_factor_fill 9
-pc_type asm
-pc_asm_overlap 3
-sub_pc_view
-sub_pc_type ilu
-sub_pc_factor_levels 1
-sub_pc_factor_diagonal_fill
#!/bin/bash
#PBS -N MatSol
#PBS -q x12core
#PBS -l nodes=1:ppn=12
#PBS -l walltime=0:30:00
cd $PBS_O_WORKDIR
dir=`basename $PBS_O_WORKDIR`
rm -f res
#mat="z32k.mtx z32k.rhs - database.txt"
#mat="z32k.mtx - 1 database.txt"
mat="n142-2.mtx n142-2.rhs - database.txt"
for p in 4 ; do ### 1 2 3 4 5 6 7 8 9 10 11 12 12 24 36 72 144
#for s in 8 0 ; do ### 8-PE 10-FC 11-K3 0-II
for s in 8 10 11 0 ; do ### 8-PE 10-FC 11-K3 0-II
if [ $p -le 12 ] ; then npernode=$p ; else npernode=12 ; fi
echo ::: dir=$dir p=$p npernode=$npernode s=$s mat=$mat ::: >> res
mpiexec -np $p -npernode $npernode ./MatSolve $s $mat >> res
done; done
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment