Commit 601f715a authored by Kirill Nikitin's avatar Kirill Nikitin

Examples: added GridGen and FVDiscr examples

Added GridGen – simple parallel mesh generator
Added FVDiscr – simple two-point FVM scheme
parent 8228d5ef
......@@ -2,5 +2,7 @@ add_subdirectory(DrawGrid)
add_subdirectory(OldDrawGrid)
add_subdirectory(DrawMatrix)
add_subdirectory(MatSolve)
add_subdirectory(GridGen)
add_subdirectory(FVDiscr)
add_subdirectory(OctreeCutcell)
add_subdirectory(Solver)
\ No newline at end of file
add_subdirectory(Solver)
project(FVDiscr)
set(SOURCE main.cpp)
add_executable(FVDiscr ${SOURCE})
target_link_libraries(FVDiscr inmost)
if(USE_SOLVER_ANI)
message("linking FVDiscr with ani3d")
target_link_libraries(FVDiscr ani3d)
endif()
if(USE_SOLVER_PETSC)
message("linking FVDiscr with PETSc")
link_directories(${PETSC_LIBRARY_DIRS})
target_link_libraries(FVDiscr ${PETSC_LIBRARIES})
endif()
if(USE_PARTITIONER)
if(USE_PARTITIONER_ZOLTAN)
target_link_libraries(FVDiscr ${ZOLTAN_LIBRARIES})
endif()
if(USE_PARTITIONER_PARMETIS)
target_link_libraries(FVDiscr ${PARMETIS_LIBRARIES})
endif()
endif()
if(USE_MPI)
message("linking FVDiscr with MPI")
target_link_libraries(FVDiscr ${MPI_LIBRARIES})
if(MPI_LINK_FLAGS)
set_target_properties(FVDiscr PROPERTIES LINK_FLAGS "${MPI_LINK_FLAGS}")
endif()
endif(USE_MPI)
//g++ main.cpp -lpetsc -L/usr/X11R6/lib -lX11 -ldmumps -lmumps_common -lmpi_f77 -lscalapack -lpord -lblacs -lparmetis -lmetis -lmpi -lHYPRE -lgfortran -lblas -llapack ../../INMOST.a
//mpicxx main.cpp -lpetsc -L/usr/X11R6/lib -lX11 -ldmumps -lmumps_common -lmpi_f77 -lscalapack -lpord -lblacs -lparmetis -lmetis -lmpi -lHYPRE -lgfortran -lblas -llapack ../../INMOST.a -lzoltan -lparmetis -lmetis
// run ./a.out grids/rezultMesh.vtk MATERIALS -ksp_monitor -ksp_view -mat_type aij -vec_type standard
#include "../../inmost.h"
#include <stdio.h>
#ifndef M_PI
#define M_PI 3.141592653589
#endif
using namespace INMOST;
#if defined(USE_MPI)
#define BARRIER MPI_Barrier(MPI_COMM_WORLD);
#else
#define BARRIER
#endif
void make_vec(Storage::real v1[3], Storage::real v2[3], Storage::real out[3])
{
out[0] = v1[0] - v2[0];
out[1] = v1[1] - v2[1];
out[2] = v1[2] - v2[2];
}
Storage::real dot_prod(Storage::real v1[3], Storage::real v2[3])
{
return v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2];
}
Storage::real transmissibility(Storage::real vec[3], Storage::real K, Storage::real normal_face[3])
{
Storage::real Kn[3];
Kn[0] = K * normal_face[0], Kn[1] = K * normal_face[1], Kn[2] = K * normal_face[2];
return dot_prod(vec,Kn);
}
Storage::real func(Storage::real x[3], Storage::real tmp)
{
// return x[0] + 2 * x[1] + 3 * x[2];
return sin (M_PI * x[0]) * sin (M_PI * x[1]) * sin (M_PI * x[2]);
(void) tmp;
}
Storage::real func_rhs(Storage::real x[3], Storage::real tmp)
{
// return 0;
return -3 * tmp * M_PI * M_PI * sin (M_PI * x[0]) * sin (M_PI * x[1]) * sin (M_PI * x[2]);
}
int main(int argc,char ** argv)
{
Solver::Initialize(&argc,&argv,"");
#if defined(USE_PARTITIONER)
Partitioner::Initialize(&argc,&argv);
#endif
if( argc > 1 )
{
Tag phi, tensor_K, id;
Mesh * m = new Mesh();
double ttt = Timer();
bool repartition = false;
m->SetCommunicator(INMOST_MPI_COMM_WORLD);
if( m->GetProcessorRank() == 0 ) std::cout << argv[0] << std::endl;
if( m->isParallelFileFormat(argv[1]) )
{
m->Load(argv[1]);
repartition = true;
}
else
{
if( m->GetProcessorRank() == 0 )
m->Load(argv[1]);
}
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Processors: " << m->GetProcessorsNumber() << std::endl;
if( m->GetProcessorRank() == 0 ) std::cout << "Load(MPI_File): " << Timer()-ttt << std::endl;
//~ double ttt2 = Timer();
//~ Mesh t;
//~ t.SetCommunicator(INMOST_MPI_COMM_WORLD);
//~ t.SetParallelFileStrategy(0);
//~ t.Load(argv[1]);
//~ BARRIER
//~ if( m->GetProcessorRank() == 0 ) std::cout << "Load(MPI_Scatter): " << Timer()-ttt2 << std::endl;
#if defined(USE_PARTITIONER)
ttt = Timer();
Partitioner * p = new Partitioner(m);
p->SetMethod(Partitioner::Inner_RCM,Partitioner::Partition);
p->Evaluate();
delete p;
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Evaluate: " << Timer()-ttt << std::endl;
ttt = Timer();
m->Redistribute();
m->ReorderEmpty(CELL|FACE|EDGE|NODE);
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Redistribute: " << Timer()-ttt << std::endl;
#endif
ttt = Timer();
m->AssignGlobalID(CELL | EDGE | FACE | NODE);
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Assign id: " << Timer()-ttt << std::endl;
id = m->GlobalIDTag();
phi = m->CreateTag("Solution",DATA_REAL,CELL,NONE,1);
tensor_K = m->CreateTag("K",DATA_REAL,CELL,NONE,1);
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell)
if( cell->GetStatus() != Element::Ghost )
cell->Real(tensor_K) = 1.0;
ttt = Timer();
m->ExchangeGhost(1,FACE);
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Exchange ghost: " << Timer()-ttt << std::endl;
ttt = Timer();
Solver S(Solver::INNER_ILU2);
Solver::Matrix A;
Solver::Vector x,b;
std::map<GeometricData,ElementType> table;
table[MEASURE] = CELL | FACE;
table[CENTROID] = CELL | FACE;
table[BARYCENTER] = CELL | FACE;
table[NORMAL] = FACE;
table[ORIENTATION] = FACE;
m->PrepareGeometricData(table);
//~ BARRIER
//~ if( m->GetProcessorRank() == 0 ) std::cout << "Prepare geometric data: " << Timer()-ttt << std::endl;
unsigned idmax = 0, idmin = UINT_MAX;
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell)
if( cell->GetStatus() != Element::Ghost )
{
unsigned pid = cell->Integer(id);
unsigned pid2 = cell->GlobalID();
if( pid < idmin ) idmin = pid;
if( pid+1 > idmax ) idmax = pid+1;
}
A.SetInterval(idmin,idmax);
x.SetInterval(idmin,idmax);
b.SetInterval(idmin,idmax);
//~ std::cout << m->GetProcessorRank() << " A,x,b interval " << idmin << ":" << idmax << " size " << idmax-idmin << std::endl;
//solve \nabla \cdot \nabla phi = f equation
for(Mesh::iteratorFace face = m->BeginFace(); face != m->EndFace(); ++face)
{
//~ std::cout << face->LocalID() << " / " << m->NumberOfFaces() << std::endl;
Element::Status s1,s2;
Cell * r1 = face->BackCell();
Cell * r2 = face->FrontCell();
if( ((r1 == NULL || (s1 = r1->GetStatus()) == Element::Ghost)?0:1)+
((r2 == NULL || (s2 = r2->GetStatus()) == Element::Ghost)?0:1) == 0) continue;
Storage::real f_nrm[3], r1_cnt[3], r2_cnt[3], f_cnt[3], d1[3], Coef;
Storage::real f_area = face->Area();
Storage::real vol1 = r1->Volume(), vol2;
Storage::integer id1 = r1->Integer(id), id2;
Storage::real K1 = r1->Real(tensor_K), K2, Kav;
face->Normal(f_nrm);
f_nrm[0] /= f_area;
f_nrm[1] /= f_area;
f_nrm[2] /= f_area;
r1->Barycenter(r1_cnt);
face->Barycenter(f_cnt);
if( r2 == NULL ) //boundary condition
{
Storage::real bnd_pnt[3], dist;
make_vec(f_cnt,r1_cnt,d1);
dist = dot_prod(f_nrm,d1) / dot_prod(f_nrm,f_nrm);
// bnd_pnt is a projection of the cell center to the face
bnd_pnt[0] = r1_cnt[0] + dist * f_nrm[0],
bnd_pnt[1] = r1_cnt[1] + dist * f_nrm[1],
bnd_pnt[2] = r1_cnt[2] + dist * f_nrm[2];
Coef = K1 * f_area / dist;
A[id1][id1] += -Coef;
b[id1] += -Coef * func(bnd_pnt, 0);
}
else
{
vol2 = r2->Volume();
K2 = r2->Real(tensor_K);
id2 = r2->Integer(id);
r2->Barycenter(r2_cnt);
// Kav = 0.5 * ( K1 + K2 ); // Arithmetic mean
Kav = 2.0 / ( 1.0 / K1 + 1.0 / K2 ); // Harmonic mean
make_vec(r2_cnt,r1_cnt,d1);
Coef = transmissibility(d1,Kav,f_nrm)/dot_prod(d1,d1) * f_area;
if( s1 != Element::Ghost )
{
A[id1][id1] += -Coef;
A[id1][id2] += Coef;
}
if( s2 != Element::Ghost )
{
A[id2][id1] += Coef;
A[id2][id2] += -Coef;
}
}
}
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell)
if( cell->GetStatus() != Element::Ghost )
b[cell->Integer(id)] += cell->Mean(func_rhs, cell->Real(tensor_K)) * cell->Volume();
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Matrix assemble: " << Timer()-ttt << std::endl;
m->RemoveGeometricData(table);
//~ ttt = Timer();
//~ A.Save("A.mtx");
//~ b.Save("b.rhs");
//~ BARRIER
//~ if( m->GetProcessorRank() == 0 ) std::cout << "Save matrix and RHS: " << Timer()-ttt << std::endl;
ttt = Timer();
S.SetMatrix(A);
S.Solve(b,x);
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Solve system: " << Timer()-ttt << std::endl;
ttt = Timer();
Storage::real err_C = 0.0, err_L2 = 0.0;
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); cell++)
if( cell->GetStatus() != Element::Ghost )
{
Storage::real exact = cell->Mean(func, 0);
Storage::real err = fabs (x[cell->Integer(id)] - exact);
if (err > err_C)
err_C = err;
err_L2 += err * err * cell->Volume();
// x[cell->Integer(id)] = err;
}
err_C = m->AggregateMax(err_C);
err_L2 = sqrt(m->Integrate(err_L2));
if( m->GetProcessorRank() == 0 ) std::cout << "err_C = " << err_C << std::endl;
if( m->GetProcessorRank() == 0 ) std::cout << "err_L2 = " << err_L2 << std::endl;
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Compute residual: " << Timer()-ttt << std::endl;
ttt = Timer();
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); cell++)
if( cell->GetStatus() != Element::Ghost )
cell->Real(phi) = x[cell->Integer(id)];
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Retrive data: " << Timer()-ttt << std::endl;
ttt = Timer();
m->ExchangeData(phi,CELL);
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Exchange phi: " << Timer()-ttt << std::endl;
ttt = Timer();
if (m->GetProcessorsNumber() == 1 )
m->Save("grid.vtk");
else
m->Save("grid.pvtk");
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Save pvtk: " << Timer()-ttt << std::endl;
delete m;
}
else
{
std::cout << argv[0] << " [mesh_file]" << std::endl;
}
#if defined(USE_PARTITIONER)
Partitioner::Finalize();
#endif
Solver::Finalize();
return 0;
}
project(GridGen)
set(SOURCE main.cpp)
add_executable(GridGen ${SOURCE})
target_link_libraries(GridGen inmost)
if(USE_MPI)
message("linking GridGen with MPI")
target_link_libraries(GridGen ${MPI_LIBRARIES})
if(MPI_LINK_FLAGS)
set_target_properties(GridGen PROPERTIES LINK_FLAGS "${MPI_LINK_FLAGS}")
endif()
endif(USE_MPI)
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "../../inmost.h"
#include <string.h>
using namespace INMOST;
#define MESH_SIZE 64
#define V_ID(x, y, z) ((x-localstart[0])*(localsize[1]+1)*(localsize[2]+1) + (y-localstart[1])*(localsize[2]+1) + (z-localstart[2]))
Mesh * ParallelCubeGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
{
int procs_per_axis[3] = {1,1,1};
int sizes[3] = {nx,ny,nz};
int rank,size;
Mesh * m = new Mesh();
m->SetCommunicator(comm);
#if defined(USE_MPI)
MPI::COMM_WORLD.Set_errhandler(MPI::ERRORS_THROW_EXCEPTIONS);
#endif
rank = m->GetProcessorRank();
size = m->GetProcessorsNumber();
{
int divsize = size;
std::vector<int> divs;
while( divsize > 1 )
{
for(int k = 2; k <= divsize; k++)
if( divsize % k == 0 )
{
divs.push_back(k);
divsize /= k;
break;
}
}
int elements_per_procs[3] = {sizes[0],sizes[1],sizes[2]};
for(std::vector<int>::reverse_iterator it = divs.rbegin(); it != divs.rend(); it++)
{
int * max = std::max_element(elements_per_procs+0,elements_per_procs+3);
procs_per_axis[max-elements_per_procs] *= *it;
(*max) /= *it;
}
}
//rank = proc_coords[2] * procs_per_axis[0] *procs_per_axis[1] + proc_coords[1] * procs_per_axis[0] + proc_coords[0];
int proc_coords[3] = {rank % procs_per_axis[0] , rank / procs_per_axis[0] % procs_per_axis[1], rank / (procs_per_axis[0] *procs_per_axis[1]) };
//???
int localsize[3], localstart[3], localend[3];
int avgsize[3] =
{
(int)ceil(sizes[0]/procs_per_axis[0]),
(int)ceil(sizes[1]/procs_per_axis[1]),
(int)ceil(sizes[2]/procs_per_axis[2])
};
for(int j = 0; j < 3; j++)
{
localstart[j] = avgsize[j] * proc_coords[j];
if( proc_coords[j] == procs_per_axis[j] - 1 )
localsize[j] = sizes[j] - avgsize[j] * (procs_per_axis[j]-1);
else localsize[j] = avgsize[j];
localend[j] = localstart[j] + localsize[j];
}
std::vector<Node *> newverts;
newverts.reserve(localsize[0]*localsize[1]*localsize[2]);
for(int i = localstart[0]; i <= localend[0]; i++)
for(int j = localstart[1]; j <= localend[1]; j++)
for(int k = localstart[2]; k <= localend[2]; k++)
{
Storage::real xyz[3];
xyz[0] = i * 1.0 / (sizes[0]);
xyz[1] = j * 1.0 / (sizes[1]);
xyz[2] = k * 1.0 / (sizes[2]);
newverts.push_back(m->CreateNode(xyz));
if( ((int)newverts.size()-1) != V_ID(i,j,k))
printf("v_id = %ld, [%d,%d,%d] = %d\n", newverts.size()-1,i,j,k, V_ID(i, j, k));
}
std::vector< std::vector<Node *> > c_f_nodes(6);
for (int i = 0; i < 6; i++)
c_f_nodes[i].resize(4);
for(int i = localstart[0]+1; i <= localend[0]; i++)
for(int j = localstart[1]+1; j <= localend[1]; j++)
for(int k = localstart[2]+1; k <= localend[2]; k++)
{
const INMOST_DATA_ENUM_TYPE nvf[24] = {0,4,6,2,1,3,7,5,0,1,5,4,2,6,7,3,0,2,3,1,4,5,7,6};
const INMOST_DATA_ENUM_TYPE numnodes[6] = { 4, 4, 4, 4, 4, 4 };
Node * verts[8];
verts[0] = newverts[V_ID(i - 1,j - 1, k - 1)];
verts[1] = newverts[V_ID(i - 0,j - 1, k - 1)];
verts[2] = newverts[V_ID(i - 1,j - 0, k - 1)];
verts[3] = newverts[V_ID(i - 0,j - 0, k - 1)];
verts[4] = newverts[V_ID(i - 1,j - 1, k - 0)];
verts[5] = newverts[V_ID(i - 0,j - 1, k - 0)];
verts[6] = newverts[V_ID(i - 1,j - 0, k - 0)];
verts[7] = newverts[V_ID(i - 0,j - 0, k - 0)];
m->CreateCell(verts,nvf,numnodes,6).first;
}
m->ResolveShared();
m->ExchangeGhost(2,NODE);
//m->Save("test.pvtk");
return m;
}
Mesh * ParallelCubePrismGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
{
int procs_per_axis[3] = {1,1,1};
int sizes[3] = {nx,ny,nz};
int rank,size;
Mesh * m = new Mesh();
m->SetCommunicator(comm);
#if defined(USE_MPI)
MPI::COMM_WORLD.Set_errhandler(MPI::ERRORS_THROW_EXCEPTIONS);
#endif
rank = m->GetProcessorRank();
size = m->GetProcessorsNumber();
{
int divsize = size;
std::vector<int> divs;
while( divsize > 1 )
{
for(int k = 2; k <= divsize; k++)
if( divsize % k == 0 )
{
divs.push_back(k);
divsize /= k;
break;
}
}
int elements_per_procs[3] = {sizes[0],sizes[1],sizes[2]};
for(std::vector<int>::reverse_iterator it = divs.rbegin(); it != divs.rend(); it++)
{
int * max = std::max_element(elements_per_procs+0,elements_per_procs+3);
procs_per_axis[max-elements_per_procs] *= *it;
(*max) /= *it;
}
}
//rank = proc_coords[2] * procs_per_axis[0] *procs_per_axis[1] + proc_coords[1] * procs_per_axis[0] + proc_coords[0];
int proc_coords[3] = {rank % procs_per_axis[0] , rank / procs_per_axis[0] % procs_per_axis[1], rank / (procs_per_axis[0] *procs_per_axis[1]) };
//???
int localsize[3], localstart[3], localend[3];
int avgsize[3] =
{
(int)ceil(sizes[0]/procs_per_axis[0]),
(int)ceil(sizes[1]/procs_per_axis[1]),
(int)ceil(sizes[2]/procs_per_axis[2])
};
for(int j = 0; j < 3; j++)
{
localstart[j] = avgsize[j] * proc_coords[j];
if( proc_coords[j] == procs_per_axis[j] - 1 )
localsize[j] = sizes[j] - avgsize[j] * (procs_per_axis[j]-1);
else localsize[j] = avgsize[j];
localend[j] = localstart[j] + localsize[j];
}
std::vector<Node *> newverts;
newverts.reserve(localsize[0]*localsize[1]*localsize[2]);
for(int i = localstart[0]; i <= localend[0]; i++)
for(int j = localstart[1]; j <= localend[1]; j++)
for(int k = localstart[2]; k <= localend[2]; k++)
{
Storage::real xyz[3];
xyz[0] = i * 1.0 / (sizes[0]);
xyz[1] = j * 1.0 / (sizes[1]);
xyz[2] = k * 1.0 / (sizes[2]);
newverts.push_back(m->CreateNode(xyz));
if( ((int)newverts.size()-1) != V_ID(i,j,k))
printf("v_id = %ld, [%d,%d,%d] = %d\n", newverts.size()-1,i,j,k, V_ID(i, j, k));
}
std::vector< std::vector<Node *> > c_f_nodes(5);
for (int i = 0; i < 3; i++)
c_f_nodes[i].resize(4);
for (int i = 3; i < 5; i++)
c_f_nodes[i].resize(3);
for(int i = localstart[0]+1; i <= localend[0]; i++)
for(int j = localstart[1]+1; j <= localend[1]; j++)
for(int k = localstart[2]+1; k <= localend[2]; k++)
{
const INMOST_DATA_ENUM_TYPE NE_nvf1[18] = {0,4,6,2,0,3,7,4,2,6,7,3,0,2,3,4,7,6};
const INMOST_DATA_ENUM_TYPE NE_nvf2[18] = {0,4,7,3,1,3,7,5,0,1,5,4,0,3,1,4,5,7};
const INMOST_DATA_ENUM_TYPE NE_nvf3[18] = {0,4,6,2,2,6,5,1,1,5,4,0,0,2,1,4,5,6};
const INMOST_DATA_ENUM_TYPE NE_nvf4[18] = {1,5,6,2,1,3,7,5,7,3,2,6,1,2,3,6,5,7};
const INMOST_DATA_ENUM_TYPE numnodes[5] = {4,4,4,3,3};
Node * verts[8];
verts[0] = newverts[V_ID(i - 1,j - 1, k - 1)];
verts[1] = newverts[V_ID(i - 0,j - 1, k - 1)];
verts[2] = newverts[V_ID(i - 1,j - 0, k - 1)];
verts[3] = newverts[V_ID(i - 0,j - 0, k - 1)];
verts[4] = newverts[V_ID(i - 1,j - 1, k - 0)];
verts[5] = newverts[V_ID(i - 0,j - 1, k - 0)];
verts[6] = newverts[V_ID(i - 1,j - 0, k - 0)];
verts[7] = newverts[V_ID(i - 0,j - 0, k - 0)];
if ((i + j) % 2 == 0)
{
m->CreateCell(verts,NE_nvf1,numnodes,5).first;
m->CreateCell(verts,NE_nvf2,numnodes,5).first;
}
else
{
m->CreateCell(verts,NE_nvf3,numnodes,5).first;
m->CreateCell(verts,NE_nvf4,numnodes,5).first;
}
}
m->ResolveShared();
m->ExchangeGhost(2,NODE);
//m->Save("test.pvtk");
return m;
}
/****************************************************************************/
int main(int argc, char *argv[])
{
Mesh * mesh;
Mesh::Initialize(&argc,&argv);
double tt = Timer();
int nx = MESH_SIZE, ny = MESH_SIZE, nz = 1;
if( argc > 3 )
{
nx = atoi(argv[1]);
ny = atoi(argv[2]);
nz = atoi(argv[3]);
}
else
if( mesh->GetProcessorRank() == 0 ) std::cout << "Default grid: " << nx << " x " << ny << " x " << nz << std::endl;
// mesh = ParallelCubeGenerator(INMOST_MPI_COMM_WORLD,nx,ny,nz);
mesh = ParallelCubePrismGenerator(INMOST_MPI_COMM_WORLD,nx,ny,nz);
tt = Timer() - tt;
tt = mesh->Integrate(tt)/mesh->GetProcessorsNumber();
if( mesh->GetProcessorRank() == 0 ) std::cout << "Gen time: " << tt << std::endl;
if( mesh->GetProcessorRank() == 0 ) std::cout << "Procs: " <<mesh->GetProcessorsNumber() << std::endl;
if (mesh->GetProcessorsNumber() == 1 )
mesh->Save("grid.vtk");
else
mesh->Save("grid.pvtk");
MPI_Barrier(mesh->GetCommunicator());
if( mesh->GetProcessorRank() == 0 )
{
// printf ("Initialization time: %.2f sec. Time step = %.2f\nComputation time: %.2f sec, total iterations = %d (linear = %d)\n",
// tinit, timestep, tcomp, totnit, totlit);
}
delete mesh;
Mesh::Finalize();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment