Commit 17b81e8a authored by Kirill Terekhov's avatar Kirill Terekhov

Fixes

Activated checking for cell duplication by default to cure problem when
loading distributed meshes with ghost cells on same processor.
Fixed problem that vtk reader may delete some nodes that were there
previously.
Removed possibility to pass parameters directly to internal solvers
through Solver::SetParameter* since they are overwritten later.
Added but not yet activated solver_test001.
parent 12311436
......@@ -111,7 +111,7 @@ namespace INMOST
static const TopologyCheck NEED_TEST_CLOSURE = 0x40000000; //done//silent, test's for closure in ComputeGeometricType, needed to detect MultiLine and MultiPolygon
static const TopologyCheck DISABLE_2D = 0x80000000; //done//don't allow 2d grids, where edges appear to be vertexes, faces are edges and cells are faces
static const TopologyCheck GRID_CONFORMITY = NEED_TEST_CLOSURE | PROHIBIT_MULTILINE | PROHIBIT_MULTIPOLYGON | INTERLEAVED_FACES | TRIPLE_SHARED_FACE;
static const TopologyCheck DEFAULT_CHECK = THROW_EXCEPTION | DUPLICATE_EDGE | DUPLICATE_FACE | PRINT_NOTIFY;
static const TopologyCheck DEFAULT_CHECK = THROW_EXCEPTION | DUPLICATE_EDGE | DUPLICATE_FACE | DUPLICATE_CELL | PRINT_NOTIFY;
const char * TopologyCheckNotifyString(TopologyCheck c); //mesh.cpp
const char * DataTypeName (DataType t); //tag.cpp
__INLINE bool OneType (ElementType t) {return t > 0 && (t & (t-1)) == 0;}
......
......@@ -9,15 +9,15 @@
#define DEFAULT_ADDITIVE_SCHWARTZ_OVERLAP 2
#define DEFAULT_ABSOLUTE_TOLERANCE 1.0e-5
#define DEFAULT_RELATIVE_TOLERANCE 1.0e-12
#define DEFAULT_DIVERGENCE_TOLERANCE 1.0e+20
#define DEFAULT_DIVERGENCE_TOLERANCE 1.0e+100
#define DEFAULT_MAXIMUM_ITERATIONS 2500
#define DEFAULT_SOLVER_GMRES_SUBSTEPS 2
#define DEFAULT_PRECONDITIONER_DROP_TOLERANCE 0.001
#define DEFAULT_PRECONDITIONER_REUSE_TOLERANCE 0.00001
#define DEFAULT_SOLVER_GMRES_SUBSTEPS 3
#define DEFAULT_PRECONDITIONER_DROP_TOLERANCE 0.0005
#define DEFAULT_PRECONDITIONER_REUSE_TOLERANCE 0.000001
#define DEFAULT_PRECONDITIONER_FILL_LEVEL 3
#define DEFAULT_PRECONDITIONER_DDPQ_TOLERANCE 0.75
#define DEFAULT_PRECONDITIONER_DDPQ_TOLERANCE 0.9
#define DEFAULT_PRECONDITIONER_REORDER_NONZEROS 0
#define DEFAULT_PRECONDITIONER_RESCALE_ITERS 5
#define DEFAULT_PRECONDITIONER_RESCALE_ITERS 8
#define DEFAULT_PRECONDITIONER_CONDITION_ESTIMATION 1
#define DEFAULT_PRECONDITIONER_ADAPT_DDPQ_TOLERANCE 1
......
......@@ -1355,10 +1355,13 @@ ecl_exit_loop:
}
}
if( find == -1 )
{
newnodes[i] = CreateNode(coords)->GetHandle();
SetMarker(newnodes[i],unused_marker);
}
else
newnodes[i] = old_nodes[find];
SetMarker(newnodes[i],unused_marker);
if( verbosity > 1 && i%report_pace == 0)
{
printf("nodes %3.1f%%\r",(i*100.0)/(1.0*npoints));
......@@ -1381,17 +1384,26 @@ ecl_exit_loop:
int find = -1;
if( !old_nodes.empty() )
{
REPORT_VAL("look up",coords[0] << " " << coords[1] << " " << coords[2]);
std::vector<HandleType>::iterator it = std::lower_bound(old_nodes.begin(),old_nodes.end(),coords,CentroidComparator(this));
if( it != old_nodes.end() )
{
Storage::real_array c = RealArrayDF(*it,CoordsTag());
REPORT_VAL("found",c[0] << " " << c[1] << " " << c[2]);
if( CentroidComparator(this).Compare(c.data(),coords) == 0 )
{
find = static_cast<int>(it - old_nodes.begin());
REPORT_VAL("match",find << " " << old_nodes[find]);
}
}
}
if( find == -1 ) newnodes[i] = CreateNode(coords)->GetHandle();
if( find == -1 )
{
newnodes[i] = CreateNode(coords)->GetHandle();
SetMarker(newnodes[i],unused_marker);
}
else newnodes[i] = old_nodes[find];
SetMarker(newnodes[i],unused_marker);
if( verbosity > 1 && i%report_pace == 0)
{
printf("nodes %3.1f%%\r",(i*100.0)/(1.0*npoints));
......
......@@ -3883,6 +3883,7 @@ namespace INMOST
void Mesh::ExchangeGhost(Storage::integer layers, ElementType bridge)
{
//printf("called exchange ghost with %d bridge %s\n",layers,ElementTypeName(bridge));
if( m_state == Serial ) return;
ENTER_FUNC();
#if defined(USE_MPI)
......
......@@ -1252,10 +1252,12 @@ namespace INMOST
solver_gmres_substeps = val;
else if( name == "reorder_nonzeros" )
preconditioner_reorder_nonzero = val;
/* Block below leads to confusion - parameters would be reset by preset values during setup
else if( _pack == INNER_ILU2 || _pack == INNER_MLILUC )
{
try
{
//This leads to confusion -
IterativeMethod * method = (IterativeMethod *)solver_data;
if (name[0] == ':')
method->EnumParameter(name.substr(1, name.size() - 1)) = val;
......@@ -1267,6 +1269,7 @@ namespace INMOST
std::cout << "Parameter " << name << " of intergral type is unknown" << std::endl;
}
}
*/
else std::cout << "Parameter " << name << " of integral type is unknown" << std::endl;
}
void Solver::SetParameterReal(std::string name, INMOST_DATA_REAL_TYPE val)
......@@ -1285,6 +1288,7 @@ namespace INMOST
preconditioner_ddpq_tolerance = val;
else if( name == "fill_level" )
preconditioner_fill_level = val;
/* Block below leads to confusion - parameters would be reset by preset values during setup
else if( _pack == INNER_ILU2 || _pack == INNER_MLILUC )
{
try
......@@ -1298,6 +1302,7 @@ namespace INMOST
std::cout << "Parameter " << name << " of real type is unknown" << std::endl;
}
}
*/
else std::cout << "Parameter " << name << " of real type is unknown" << std::endl;
}
Solver::Solver(Type pack, std::string _name, INMOST_MPI_Comm _comm)
......
if(USE_SOLVER)
add_subdirectory(solver_test000)
#add_subdirectory(solver_test001)
endif(USE_SOLVER)
if(USE_MESH AND USE_MPI)
......
project(solver_test001)
set(SOURCE main.cpp)
add_executable(solver_test001 ${SOURCE})
target_link_libraries(solver_test001 inmost)
if(USE_MPI)
message("linking solver_test001 with MPI")
target_link_libraries(solver_test001 ${MPI_LIBRARIES})
if(MPI_LINK_FLAGS)
set_target_properties(solver_test001 PROPERTIES LINK_FLAGS "${MPI_LINK_FLAGS}")
endif()
endif(USE_MPI)
if(USE_SOLVER_ANI)
message("linking solver_test001 with ani3d")
target_link_libraries(solver_test001 ani3d)
endif()
if(USE_SOLVER_PETSC)
message("linking solver_test001 with PETSc")
target_link_libraries(solver_test001 ${PETSC_LIBRARIES})
endif()
if(USE_SOLVER_TRILINOS)
message("linking solver_test001 with Trilinos")
target_link_libraries(solver_test001 ${Trilinos_LIBRARIES} ${Trilinos_TPL_LIBRARIES})
endif()
set(TOKAMAK_TESTS utm300
utm1700a
utm1700b
utm3060
utm5940)
foreach(test ${TOKAMAK_TESTS})
add_test(NAME solver_test001_sparsekit_tokamak_${test} COMMAND $<TARGET_FILE:solver_test001> ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/tokamak/${test}.mtx ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/tokamak/${test}_rhs1.mtx)
endforeach()
set(DRIVCAV_OLD_TESTS cavity01
cavity02
cavity03
cavity04
cavity05
cavity06
cavity07
cavity08
cavity09
cavity10
cavity11
cavity12
cavity13
cavity14
cavity15
cavity16
cavity17
cavity18
cavity19
cavity20
cavity21
cavity22
cavity23
cavity24
cavity25
cavity26)
foreach(test ${DRIVCAV_OLD_TESTS})
add_test(NAME solver_test001_sparsekit_drivcav_old_${test} COMMAND $<TARGET_FILE:solver_test001> ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/drivcav_old/${test}.mtx ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/drivcav_old/${test}_rhs1.mtx)
endforeach()
set(FIDAP_TESTS fidap001
fidap002
fidap003
fidap004
fidap005
fidap006
fidap007
fidap008
fidap009
fidap010
fidap011
fidap012
fidap013
fidap014
fidap015
fidap018
fidap019
fidap020
fidap021
fidap022
fidap023
fidap024
fidap025
fidap026
fidap027
fidap028
fidap029
fidap031
fidap032
fidap033
fidap035
fidap036
fidap037
fidapm02
fidapm03
fidapm05
fidapm07
fidapm08
fidapm09
fidapm10
fidapm11
fidapm13
fidapm15
fidapm29
fidapm33
fidapm37)
foreach(test ${FIDAP_TESTS})
add_test(NAME solver_test001_sparsekit_fidap_${test} COMMAND $<TARGET_FILE:solver_test001> ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/fidap/${test}.mtx ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/fidap/${test}_rhs1.mtx)
endforeach()
set(DRIVCAV_TESTS e05r0000
e05r0100
e05r0200
e05r0300
e05r0400
e05r0500
e20r0000
e20r0100
e20r0500
e20r1000
e20r2000
e20r3000
e20r4000
e20r5000
e30r0000
e30r0100
e30r0500
e30r1000
e30r2000
e30r3000
e30r4000
e30r5000
e40r0000
e40r0100
e40r0500
e40r1000
e40r2000
e40r3000
e40r4000
e40r5000)
foreach(test ${DRIVCAV_TESTS})
# add_test(NAME solver_test001_sparsekit_drivcav_${test} COMMAND $<TARGET_FILE:solver_test001> ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/drivcav/${test}.mtx ${CMAKE_CURRENT_SOURCE_DIR}/matrices/sparskit/drivcav/${test}_rhs1.mtx)
endforeach()
\ No newline at end of file
#include <string>
#include <iostream>
#include "../../inmost.h"
using namespace INMOST;
#if defined(USE_MPI)
#define BARRIER MPI_Barrier(MPI_COMM_WORLD);
#else
#define BARRIER
#endif
int main(int argc, char ** argv)
{
int rank,procs;
if( argc < 2 )
{
std::cout << "Usage: " << argv[0] << " matrix.mtx [right_hand_side.rhs]" << std::endl;
return -1;
}
Solver::Type type = Solver::INNER_MLILUC;
Solver::Initialize(&argc,&argv,NULL); // Initialize the linear solver in accordance with args
{
#if defined(USE_MPI)
MPI_Comm_rank(MPI_COMM_WORLD,&rank); // Get the rank of the current process
MPI_Comm_size(MPI_COMM_WORLD,&procs); // Get the total number of processors used
#else
rank = 0;
procs = 1;
#endif
//std::cout << rank << "/" << procs << " " << argv[0] << std::endl;
Solver::Matrix mat("A"); // Declare the matrix of the linear system to be solved
Solver::Vector b("rhs"); // Declare the right-hand side vector
Solver::Vector x("sol"); // Declare the solution vector
//std::cout << rank << " load matrix from " << std::string(argv[2]) << " ..." << std::endl;
double t = Timer(), tt = Timer();
mat.Load(std::string(argv[1])); //if interval parameters not set, matrix will be divided automatically
BARRIER
if( !rank ) std::cout << "load matrix: " << Timer() - t << std::endl;
//mat.Save("test.mtx");
t = Timer();
if( argc > 2 )
{
//std::cout << rank << " load vector from " << std::string(argv[3]) << std::endl;
b.Load(std::string(argv[2])); // Load RHS vector
}
else // Set local RHS to 1 if it was not specified
{
INMOST_DATA_ENUM_TYPE mbeg,mend,k;
mat.GetInterval(mbeg,mend);
b.SetInterval(mbeg,mend);
for( k = mbeg; k < mend; ++k ) b[k] = 1.0;
}
BARRIER
if( !rank ) std::cout << "load vector: " << Timer() - t << std::endl;
bool success = false;
int iters;
std::string reason;
double resid, realresid = 0;
{
Solver s(type); // Declare the linear solver by specified type
t = Timer();
s.SetMatrix(mat); // Compute the preconditioner for the original matrix
BARRIER
if( !rank ) std::cout << "preconditioner: " << Timer() - t << std::endl;
t = Timer();
success = s.Solve(b,x); // Solve the linear system with the previously computted preconditioner
BARRIER
if( !rank ) std::cout << "solver: " << Timer() - t << std::endl;
iters = s.Iterations(); // Get the number of iterations performed
resid = s.Residual(); // Get the final residual achieved
reason = s.GetReason();
//x.Save("output.rhs");
}
tt = Timer() - tt;
{ // Compute the true residual
double aresid = 0, bresid = 0;
Solver::Vector test;
t = Timer();
Solver::OrderInfo info;
info.PrepareMatrix(mat,0);
info.PrepareVector(x);
info.Update(x);
mat.MatVec(1.0,x,0.0,test); // Multiply the original matrix by a vector
{
INMOST_DATA_ENUM_TYPE mbeg,mend,k;
info.GetLocalRegion(info.GetRank(),mbeg,mend);
for( k = mbeg; k < mend; ++k )
{
aresid += (test[k]-b[k])*(test[k]-b[k]);
bresid += b[k]*b[k];
}
}
double temp[2] = {aresid,bresid}, recv[2] = {aresid,bresid};
#if defined(USE_MPI)
MPI_Reduce(temp,recv,2,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
if( info.GetRank() == 0 ) std::cout << "||Ax-b|| " << sqrt(recv[0]) << " ||b|| " << sqrt(recv[1]) << " ||Ax-b||/||b|| " << sqrt(recv[0]/recv[1]) << std::endl;
#endif
realresid = sqrt(recv[0]/recv[1]);
//realresid = sqrt(realresid);
info.RestoreVector(x);
if( !rank ) std::cout << "norms: " << Timer() - t << std::endl;
}
{
if( rank == 0 )
{
std::cout << procs << " processors";
if( success )
{
std::cout << " solved in " << tt << " secs";
std::cout << " with " << iters << " iterations to " << resid << " norm" << std::endl;
}
else std::cout << " failed to solve" << std::endl;
std::cout << " matrix \"" << argv[1] << "\"" << std::endl;
if( argc > 2 ) std::cout << " vector \"" << argv[2] << "\"" << std::endl;
else std::cout << " unit right hand side" << std::endl;
std::cout << " true residual ||Ax-b||/||b|| " << realresid << std::endl;;
std::cout << "converged reason: " << reason << std::endl;
}
}
if( !success || (success && realresid > 1.0e-3) )
{
#if defined(USE_MPI)
MPI_Abort(MPI_COMM_WORLD,-1);
#else
exit(-1);
#endif
}
}
Solver::Finalize(); // Finalize solver and close MPI activity
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment