Commit 5da918e7 authored by Igor Konshin's avatar Igor Konshin
Browse files

Examples: clean up code

Added code comments, small fixes and improvements.
parent 868e1316
//g++ main.cpp -lpetsc -L/usr/X11R6/lib -lX11 -ldmumps -lmumps_common -lmpi_f77 -lscalapack -lpord -lblacs -lparmetis -lmetis -lmpi -lHYPRE -lgfortran -lblas -llapack ../../INMOST.a
//mpicxx main.cpp -lpetsc -L/usr/X11R6/lib -lX11 -ldmumps -lmumps_common -lmpi_f77 -lscalapack -lpord -lblacs -lparmetis -lmetis -lmpi -lHYPRE -lgfortran -lblas -llapack ../../INMOST.a -lzoltan -lparmetis -lmetis
// run ./a.out grids/rezultMesh.vtk MATERIALS -ksp_monitor -ksp_view -mat_type aij -vec_type standard
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "../../inmost.h"
#include <stdio.h>
using namespace INMOST;
#ifndef M_PI
#define M_PI 3.141592653589
#endif
using namespace INMOST;
#if defined(USE_MPI)
#define BARRIER MPI_Barrier(MPI_COMM_WORLD);
......@@ -49,28 +50,29 @@ Storage::real func_rhs(Storage::real x[3], Storage::real tmp)
int main(int argc,char ** argv)
{
Solver::Initialize(&argc,&argv,"");
Solver::Initialize(&argc,&argv,""); // Initialize the solver and MPI activity
#if defined(USE_PARTITIONER)
Partitioner::Initialize(&argc,&argv);
Partitioner::Initialize(&argc,&argv); // Initialize the partitioner activity
#endif
if( argc > 1 )
{
Tag phi, tensor_K, id;
Mesh * m = new Mesh();
Mesh * m = new Mesh(); // Create an empty mesh
double ttt = Timer();
bool repartition = false;
m->SetCommunicator(INMOST_MPI_COMM_WORLD);
if( m->GetProcessorRank() == 0 ) std::cout << argv[0] << std::endl;
m->SetCommunicator(INMOST_MPI_COMM_WORLD); // Set the MPI communicator for the mesh
if( m->GetProcessorRank() == 0 ) // If the current process is the master one
std::cout << argv[0] << std::endl;
if( m->isParallelFileFormat(argv[1]) )
if( m->isParallelFileFormat(argv[1]) )
{
m->Load(argv[1]);
m->Load(argv[1]); // Load mesh from the parallel file format
repartition = true;
}
else
{
if( m->GetProcessorRank() == 0 )
m->Load(argv[1]);
if( m->GetProcessorRank() == 0 )
m->Load(argv[1]); // Load mesh from the serial file format
}
BARRIER
......@@ -88,16 +90,16 @@ int main(int argc,char ** argv)
#if defined(USE_PARTITIONER)
ttt = Timer();
Partitioner * p = new Partitioner(m);
p->SetMethod(Partitioner::Inner_RCM,Partitioner::Partition);
p->Evaluate();
p->SetMethod(Partitioner::Inner_RCM,Partitioner::Partition); // Specify the partitioner
p->Evaluate(); // Compute the partitioner and store new processor ID in the mesh
delete p;
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Evaluate: " << Timer()-ttt << std::endl;
ttt = Timer();
m->Redistribute();
m->ReorderEmpty(CELL|FACE|EDGE|NODE);
m->Redistribute(); // Redistribute the mesh data
m->ReorderEmpty(CELL|FACE|EDGE|NODE); // Clean the data after reordring
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Redistribute: " << Timer()-ttt << std::endl;
......@@ -107,24 +109,24 @@ int main(int argc,char ** argv)
m->AssignGlobalID(CELL | EDGE | FACE | NODE);
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Assign id: " << Timer()-ttt << std::endl;
id = m->GlobalIDTag();
id = m->GlobalIDTag(); // Get the tag of the global ID
phi = m->CreateTag("Solution",DATA_REAL,CELL,NONE,1);
tensor_K = m->CreateTag("K",DATA_REAL,CELL,NONE,1);
phi = m->CreateTag("Solution",DATA_REAL,CELL,NONE,1); // Create a new tag for the solution phi
tensor_K = m->CreateTag("K",DATA_REAL,CELL,NONE,1); // Create a new tag for K tensor
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell)
if( cell->GetStatus() != Element::Ghost )
cell->Real(tensor_K) = 1.0;
for( Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell ) // Loop over mesh cells
if( cell->GetStatus() != Element::Ghost ) // If the cell is an own one
cell->Real(tensor_K) = 1.0; // Store the tensor K value into the tag
ttt = Timer();
m->ExchangeGhost(1,FACE);
m->ExchangeGhost(1,FACE); // Exchange the date (phi and tensor_K) over processes
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Exchange ghost: " << Timer()-ttt << std::endl;
ttt = Timer();
Solver S(Solver::INNER_ILU2);
Solver::Matrix A;
Solver::Vector x,b;
Solver S(Solver::INNER_ILU2); // Specify the linear solver to ASM+ILU2+BiCGStab one
Solver::Matrix A; // Declare the matrix of the linear system to be solved
Solver::Vector x,b; // Declare the solution and the right-hand side vectors
std::map<GeometricData,ElementType> table;
......@@ -138,48 +140,49 @@ int main(int argc,char ** argv)
//~ if( m->GetProcessorRank() == 0 ) std::cout << "Prepare geometric data: " << Timer()-ttt << std::endl;
unsigned idmax = 0, idmin = UINT_MAX;
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell)
if( cell->GetStatus() != Element::Ghost )
{
unsigned pid = cell->Integer(id);
unsigned pid2 = cell->GlobalID();
if( pid < idmin ) idmin = pid;
if( pid+1 > idmax ) idmax = pid+1;
}
for( Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell )
if( cell->GetStatus() != Element::Ghost )
{
unsigned pid = cell->Integer(id);
unsigned pid2 = cell->GlobalID();
if( pid < idmin ) idmin = pid;
if( pid+1 > idmax ) idmax = pid+1;
}
// Set the indeces intervals for the matrix and vectors
A.SetInterval(idmin,idmax);
x.SetInterval(idmin,idmax);
b.SetInterval(idmin,idmax);
//~ std::cout << m->GetProcessorRank() << " A,x,b interval " << idmin << ":" << idmax << " size " << idmax-idmin << std::endl;
//solve \nabla \cdot \nabla phi = f equation
for(Mesh::iteratorFace face = m->BeginFace(); face != m->EndFace(); ++face)
// Solve \nabla \cdot \nabla phi = f equation
for( Mesh::iteratorFace face = m->BeginFace(); face != m->EndFace(); ++face )
{
//~ std::cout << face->LocalID() << " / " << m->NumberOfFaces() << std::endl;
Element::Status s1,s2;
Cell * r1 = face->BackCell();
Cell * r2 = face->FrontCell();
if( ((r1 == NULL || (s1 = r1->GetStatus()) == Element::Ghost)?0:1)+
if( ((r1 == NULL || (s1 = r1->GetStatus()) == Element::Ghost)?0:1) +
((r2 == NULL || (s2 = r2->GetStatus()) == Element::Ghost)?0:1) == 0) continue;
Storage::real f_nrm[3], r1_cnt[3], r2_cnt[3], f_cnt[3], d1[3], Coef;
Storage::real f_area = face->Area();
Storage::real vol1 = r1->Volume(), vol2;
Storage::real f_area = face->Area(); // Get the face area
Storage::real vol1 = r1->Volume(), vol2; // Get the cell volume
Storage::integer id1 = r1->Integer(id), id2;
Storage::real K1 = r1->Real(tensor_K), K2, Kav;
face->Normal(f_nrm);
face->Normal(f_nrm); // Get the face normal
f_nrm[0] /= f_area;
f_nrm[1] /= f_area;
f_nrm[2] /= f_area;
r1->Barycenter(r1_cnt);
face->Barycenter(f_cnt);
if( r2 == NULL ) //boundary condition
r1->Barycenter(r1_cnt); // Get the barycenter of the cell
face->Barycenter(f_cnt); // Get the barycenter of the face
if( r2 == NULL ) // boundary condition
{
Storage::real bnd_pnt[3], dist;
make_vec(f_cnt,r1_cnt,d1);
dist = dot_prod(f_nrm,d1) / dot_prod(f_nrm,f_nrm);
// bnd_pnt is a projection of the cell center to the face
bnd_pnt[0] = r1_cnt[0] + dist * f_nrm[0],
bnd_pnt[1] = r1_cnt[1] + dist * f_nrm[1],
bnd_pnt[0] = r1_cnt[0] + dist * f_nrm[0];
bnd_pnt[1] = r1_cnt[1] + dist * f_nrm[1];
bnd_pnt[2] = r1_cnt[2] + dist * f_nrm[2];
Coef = K1 * f_area / dist;
A[id1][id1] += -Coef;
......@@ -195,7 +198,7 @@ int main(int argc,char ** argv)
Kav = 2.0 / ( 1.0 / K1 + 1.0 / K2 ); // Harmonic mean
make_vec(r2_cnt,r1_cnt,d1);
Coef = transmissibility(d1,Kav,f_nrm)/dot_prod(d1,d1) * f_area;
if( s1 != Element::Ghost )
{
A[id1][id1] += -Coef;
......@@ -208,81 +211,85 @@ int main(int argc,char ** argv)
}
}
}
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell)
if( cell->GetStatus() != Element::Ghost )
b[cell->Integer(id)] += cell->Mean(func_rhs, cell->Real(tensor_K)) * cell->Volume();
for( Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell )
if( cell->GetStatus() != Element::Ghost )
b[cell->Integer(id)] += cell->Mean(func_rhs, cell->Real(tensor_K)) * cell->Volume();
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Matrix assemble: " << Timer()-ttt << std::endl;
m->RemoveGeometricData(table);
//~ ttt = Timer();
//~ A.Save("A.mtx");
//~ b.Save("b.rhs");
//~ BARRIER
//~ if( m->GetProcessorRank() == 0 ) std::cout << "Save matrix and RHS: " << Timer()-ttt << std::endl;
m->RemoveGeometricData(table); // Clean the computed geometric data
if( argc > 3 ) // Save the matrix and RHS if required
{
ttt = Timer();
A.Save(std::string(argv[2])); // "A.mtx"
b.Save(std::string(argv[3])); // "b.rhs"
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Save matrix \"" << argv[2] << "\" and RHS \"" << argv[3] << "\": " << Timer()-ttt << std::endl;
}
ttt = Timer();
S.SetMatrix(A);
S.Solve(b,x);
S.SetMatrix(A); // Compute the preconditioner for the original matrix
S.Solve(b,x); // Solve the linear system with the previously computted preconditioner
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Solve system: " << Timer()-ttt << std::endl;
ttt = Timer();
Storage::real err_C = 0.0, err_L2 = 0.0;
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); cell++)
if( cell->GetStatus() != Element::Ghost )
{
Storage::real exact = cell->Mean(func, 0);
Storage::real err = fabs (x[cell->Integer(id)] - exact);
if (err > err_C)
err_C = err;
err_L2 += err * err * cell->Volume();
// x[cell->Integer(id)] = err;
}
err_C = m->AggregateMax(err_C);
err_L2 = sqrt(m->Integrate(err_L2));
if( m->GetProcessorRank() == 0 ) std::cout << "err_C = " << err_C << std::endl;
for( Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell )
if( cell->GetStatus() != Element::Ghost )
{
Storage::real exact = cell->Mean(func, 0); // Compute the mean value of the function over the cell
Storage::real err = fabs (x[cell->Integer(id)] - exact);
if (err > err_C)
err_C = err;
err_L2 += err * err * cell->Volume();
// x[cell->Integer(id)] = err;
}
err_C = m->AggregateMax(err_C); // Compute the maximal C norm for the error
err_L2 = sqrt(m->Integrate(err_L2)); // Compute the global L2 norm for the error
if( m->GetProcessorRank() == 0 ) std::cout << "err_C = " << err_C << std::endl;
if( m->GetProcessorRank() == 0 ) std::cout << "err_L2 = " << err_L2 << std::endl;
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Compute residual: " << Timer()-ttt << std::endl;
if( m->GetProcessorRank() == 0 ) std::cout << "Compute true residual: " << Timer()-ttt << std::endl;
ttt = Timer();
for(Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); cell++)
if( cell->GetStatus() != Element::Ghost )
cell->Real(phi) = x[cell->Integer(id)];
for( Mesh::iteratorCell cell = m->BeginCell(); cell != m->EndCell(); ++cell )
if( cell->GetStatus() != Element::Ghost )
cell->Real(phi) = x[cell->Integer(id)];
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Retrive data: " << Timer()-ttt << std::endl;
ttt = Timer();
m->ExchangeData(phi,CELL);
m->ExchangeData(phi,CELL); // Data exchange over processors
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Exchange phi: " << Timer()-ttt << std::endl;
ttt = Timer();
if (m->GetProcessorsNumber() == 1 )
m->Save("grid.vtk");
std::string filename = "result";
if( m->GetProcessorsNumber() == 1 )
filename += ".vtk";
else
m->Save("grid.pvtk");
filename += ".pvtk";
ttt = Timer();
m->Save(filename);
BARRIER
if( m->GetProcessorRank() == 0 ) std::cout << "Save pvtk: " << Timer()-ttt << std::endl;
if( m->GetProcessorRank() == 0 ) std::cout << "Save \"" << filename << "\": " << Timer()-ttt << std::endl;
delete m;
}
else
{
std::cout << argv[0] << " [mesh_file]" << std::endl;
std::cout << argv[0] << " mesh_file [A.mtx b.rhs]" << std::endl;
}
#if defined(USE_PARTITIONER)
Partitioner::Finalize();
Partitioner::Finalize(); // Finalize the partitioner activity
#endif
Solver::Finalize();
Solver::Finalize(); // Finalize solver and close MPI activity
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "../../inmost.h"
#include <string.h>
#include "../../inmost.h"
using namespace INMOST;
#define MESH_SIZE 64
#define V_ID(x, y, z) ((x-localstart[0])*(localsize[1]+1)*(localsize[2]+1) + (y-localstart[1])*(localsize[2]+1) + (z-localstart[2]))
......@@ -16,17 +15,18 @@ Mesh * ParallelCubeGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
int procs_per_axis[3] = {1,1,1};
int sizes[3] = {nx,ny,nz};
int rank,size;
Mesh * m = new Mesh();
Mesh * m = new Mesh(); // Create a mesh to be constructed
m->SetCommunicator(comm); // Set the MPI communicator, usually MPI_COMM_WORLD
m->SetCommunicator(comm);
#if defined(USE_MPI)
MPI::COMM_WORLD.Set_errhandler(MPI::ERRORS_THROW_EXCEPTIONS);
#endif
rank = m->GetProcessorRank();
size = m->GetProcessorsNumber();
rank = m->GetProcessorRank(); // Get the rank of the current process
size = m->GetProcessorsNumber(); // Get the number of processors used in communicator comm
// Compute the configuration of processes connection
{
int divsize = size;
std::vector<int> divs;
......@@ -48,19 +48,19 @@ Mesh * ParallelCubeGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
(*max) /= *it;
}
}
//rank = proc_coords[2] * procs_per_axis[0] *procs_per_axis[1] + proc_coords[1] * procs_per_axis[0] + proc_coords[0];
int proc_coords[3] = {rank % procs_per_axis[0] , rank / procs_per_axis[0] % procs_per_axis[1], rank / (procs_per_axis[0] *procs_per_axis[1]) };
int proc_coords[3] = {rank % procs_per_axis[0] , rank / procs_per_axis[0] % procs_per_axis[1], rank / (procs_per_axis[0] *procs_per_axis[1]) };
//???
int localsize[3], localstart[3], localend[3];
int avgsize[3] =
int avgsize[3] =
{
(int)ceil(sizes[0]/procs_per_axis[0]),
(int)ceil(sizes[1]/procs_per_axis[1]),
(int)ceil(sizes[2]/procs_per_axis[2])
};
for(int j = 0; j < 3; j++)
{
localstart[j] = avgsize[j] * proc_coords[j];
......@@ -69,10 +69,10 @@ Mesh * ParallelCubeGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
else localsize[j] = avgsize[j];
localend[j] = localstart[j] + localsize[j];
}
std::vector<Node *> newverts;
newverts.reserve(localsize[0]*localsize[1]*localsize[2]);
for(int i = localstart[0]; i <= localend[0]; i++)
for(int j = localstart[1]; j <= localend[1]; j++)
for(int k = localstart[2]; k <= localend[2]; k++)
......@@ -81,22 +81,22 @@ Mesh * ParallelCubeGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
xyz[0] = i * 1.0 / (sizes[0]);
xyz[1] = j * 1.0 / (sizes[1]);
xyz[2] = k * 1.0 / (sizes[2]);
newverts.push_back(m->CreateNode(xyz));
newverts.push_back(m->CreateNode(xyz)); // Create node in the mesh
if( ((int)newverts.size()-1) != V_ID(i,j,k))
printf("v_id = %ld, [%d,%d,%d] = %d\n", newverts.size()-1,i,j,k, V_ID(i, j, k));
}
std::vector< std::vector<Node *> > c_f_nodes(6);
for (int i = 0; i < 6; i++)
c_f_nodes[i].resize(4);
for(int i = localstart[0]+1; i <= localend[0]; i++)
for(int j = localstart[1]+1; j <= localend[1]; j++)
for(int k = localstart[2]+1; k <= localend[2]; k++)
{
const INMOST_DATA_ENUM_TYPE nvf[24] = {0,4,6,2,1,3,7,5,0,1,5,4,2,6,7,3,0,2,3,1,4,5,7,6};
const INMOST_DATA_ENUM_TYPE numnodes[6] = { 4, 4, 4, 4, 4, 4 };
Node * verts[8];
Node * verts[8];
verts[0] = newverts[V_ID(i - 1,j - 1, k - 1)];
verts[1] = newverts[V_ID(i - 0,j - 1, k - 1)];
verts[2] = newverts[V_ID(i - 1,j - 0, k - 1)];
......@@ -105,14 +105,13 @@ Mesh * ParallelCubeGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
verts[5] = newverts[V_ID(i - 0,j - 1, k - 0)];
verts[6] = newverts[V_ID(i - 1,j - 0, k - 0)];
verts[7] = newverts[V_ID(i - 0,j - 0, k - 0)];
m->CreateCell(verts,nvf,numnodes,6).first;
m->CreateCell(verts,nvf,numnodes,6).first; // Create the cubic cell in the mesh
}
m->ResolveShared();
m->ExchangeGhost(2,NODE);
//m->Save("test.pvtk");
m->ResolveShared(); // Resolve duplicate nodes
m->ExchangeGhost(2,NODE); // Construct Ghost cells in 2 layers connected via nodes
return m;
}
......@@ -121,17 +120,18 @@ Mesh * ParallelCubePrismGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
int procs_per_axis[3] = {1,1,1};
int sizes[3] = {nx,ny,nz};
int rank,size;
Mesh * m = new Mesh();
Mesh * m = new Mesh(); // Create a mesh to be constructed
m->SetCommunicator(comm); // Set the MPI communicator, usually MPI_COMM_WORLD
m->SetCommunicator(comm);
#if defined(USE_MPI)
MPI::COMM_WORLD.Set_errhandler(MPI::ERRORS_THROW_EXCEPTIONS);
#endif
rank = m->GetProcessorRank();
size = m->GetProcessorsNumber();
rank = m->GetProcessorRank(); // Get the rank of the current process
size = m->GetProcessorsNumber(); // Get the number of processors used in communicator comm
// Compute the configuration of processes connection
{
int divsize = size;
std::vector<int> divs;
......@@ -153,19 +153,18 @@ Mesh * ParallelCubePrismGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
(*max) /= *it;
}
}
//rank = proc_coords[2] * procs_per_axis[0] *procs_per_axis[1] + proc_coords[1] * procs_per_axis[0] + proc_coords[0];
int proc_coords[3] = {rank % procs_per_axis[0] , rank / procs_per_axis[0] % procs_per_axis[1], rank / (procs_per_axis[0] *procs_per_axis[1]) };
//???
int proc_coords[3] = {rank % procs_per_axis[0] , rank / procs_per_axis[0] % procs_per_axis[1], rank / (procs_per_axis[0] *procs_per_axis[1]) };
int localsize[3], localstart[3], localend[3];
int avgsize[3] =
int avgsize[3] =
{
(int)ceil(sizes[0]/procs_per_axis[0]),
(int)ceil(sizes[1]/procs_per_axis[1]),
(int)ceil(sizes[2]/procs_per_axis[2])
};
for(int j = 0; j < 3; j++)
{
localstart[j] = avgsize[j] * proc_coords[j];
......@@ -174,10 +173,10 @@ Mesh * ParallelCubePrismGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
else localsize[j] = avgsize[j];
localend[j] = localstart[j] + localsize[j];
}
std::vector<Node *> newverts;
newverts.reserve(localsize[0]*localsize[1]*localsize[2]);
for(int i = localstart[0]; i <= localend[0]; i++)
for(int j = localstart[1]; j <= localend[1]; j++)
for(int k = localstart[2]; k <= localend[2]; k++)
......@@ -186,30 +185,30 @@ Mesh * ParallelCubePrismGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
xyz[0] = i * 1.0 / (sizes[0]);
xyz[1] = j * 1.0 / (sizes[1]);
xyz[2] = k * 1.0 / (sizes[2]);
newverts.push_back(m->CreateNode(xyz));
newverts.push_back(m->CreateNode(xyz)); // Create node in the mesh
if( ((int)newverts.size()-1) != V_ID(i,j,k))
printf("v_id = %ld, [%d,%d,%d] = %d\n", newverts.size()-1,i,j,k, V_ID(i, j, k));
}
std::vector< std::vector<Node *> > c_f_nodes(5);
for (int i = 0; i < 3; i++)
c_f_nodes[i].resize(4);
for (int i = 3; i < 5; i++)
c_f_nodes[i].resize(3);
for(int i = localstart[0]+1; i <= localend[0]; i++)
for(int j = localstart[1]+1; j <= localend[1]; j++)
for(int k = localstart[2]+1; k <= localend[2]; k++)
{
const INMOST_DATA_ENUM_TYPE NE_nvf1[18] = {0,4,6,2,0,3,7,4,2,6,7,3,0,2,3,4,7,6};
const INMOST_DATA_ENUM_TYPE NE_nvf2[18] = {0,4,7,3,1,3,7,5,0,1,5,4,0,3,1,4,5,7};
const INMOST_DATA_ENUM_TYPE NE_nvf3[18] = {0,4,6,2,2,6,5,1,1,5,4,0,0,2,1,4,5,6};
const INMOST_DATA_ENUM_TYPE NE_nvf4[18] = {1,5,6,2,1,3,7,5,7,3,2,6,1,2,3,6,5,7};
const INMOST_DATA_ENUM_TYPE numnodes[5] = {4,4,4,3,3};
Node * verts[8];
Node * verts[8];
verts[0] = newverts[V_ID(i - 1,j - 1, k - 1)];
verts[1] = newverts[V_ID(i - 0,j - 1, k - 1)];
verts[2] = newverts[V_ID(i - 1,j - 0, k - 1)];
......@@ -218,65 +217,74 @@ Mesh * ParallelCubePrismGenerator(INMOST_MPI_Comm comm, int nx, int ny, int nz)
verts[5] = newverts[V_ID(i - 0,j - 1, k - 0)];
verts[6] = newverts[V_ID(i - 1,j - 0, k - 0)];
verts[7] = newverts[V_ID(i - 0,j - 0, k - 0)];
// Create two prismatic cells in the mesh
if ((i + j) % 2 == 0)
{
m->CreateCell(verts,NE_nvf1,numnodes,5).first;
m->CreateCell(verts,NE_nvf2,numnodes,5).first;
}
else
{
m->CreateCell(verts,NE_nvf3,numnodes,5).first;
m->CreateCell(verts,NE_nvf4,numnodes,5).first;
}
}
m->ResolveShared();
m->ExchangeGhost(2,NODE);
//m->Save("test.pvtk");
m->ResolveShared(); // Resolve duplicate nodes
m->ExchangeGhost(2,NODE); // Construct Ghost cells in 2 layers connected via nodes
return m;
}
/****************************************************************************/
int main(int argc, char *argv[])
int main(int argc, char *argv[])
{
Mesh * mesh;
Mesh::Initialize(&argc,&argv);
double tt = Timer();
int nx = MESH_SIZE, ny = MESH_SIZE, nz = 1;