Commit 39dd0a0e authored by Kirill Terekhov's avatar Kirill Terekhov

Few updates for Octree example

Consistent checks for presence of global ids and use of global ids in
communications between shared and ghost elements in parallel.

Simplified octree example without GUI.
parent 8fb51550
#include "agrid.h"
#include <math.h>
Storage::integer min_nonzero(Storage::integer a, Storage::integer b)
{
Storage::integer get_max = std::max(a,b);
Storage::integer get_min = std::min(a,b);
if( get_min == 0 ) return get_max; else return get_min;
}
void AdaptiveGrid
void AdaptiveGrid::Refine(const TagInteger & indicator)
{
int schedule_counter = 1;
bool scheduled = true;
//0. Extend indicator for edges and faces
indicator = CreateTag(indicator.GetTagName(),DATA_INTEGER,FACE|EDGE,NONE,1);
while(scheduled)
{
//1.Communicate indicator - it may be not synced
ExchangeTag(indicator,0,CELL);
//2.Propogate indicator down to the faces,edges
// select latest schedule for them
//
//possible problem - a cell needs all it's elements to be splitted in the same schedule
//so the cell must mark all it's elements without hanging nodes into the same schedule,
//unless there is an even earlier cell
//
//should we select earliest possible for elements without hanging nodes and
//latest possible for elements with hanging nodes?
//
//with loop over cells we would mark elements without hanging nodes of the cells
//in current schedule
for(ElementType etype = FACE; etype >= EDGE; etype = PrevElementType(etype))
{
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for(Storage::integer it = 0; it < LastLocalID(etype); ++it) if( isValidElement(etype,it) )
{
Element e = ElementByLocalID(etype,it);
ElementArray<Element> adj = f.getAdjElements(NextElementType(etype));
//here latest schedule is selected
if( e->nbAdjElements(NODE,hanging_node,0) > 0 ) //latest possible schedule
{
for(ElementArray<Element>::size_type kt = 0; kt < adj.size(); ++kt)
if( indicator[adj[kt]] )
indicator[f] = indicator[f] ? std::min(indicator[f],indicator[adj[kt]]) : indicator[adj[kt]];
}
else //earliest possible schedule
{
for(ElementArray<Element>::size_type kt = 0; kt < adj.size(); ++kt)
indicator[f] = std::max(indicator[f],indicator[adj[kt]]);
}
}
}
//3.Communicate indicator on faces and edges
ExchangeTag(indicator,0,FACE|EDGE);
//4.Check for each cell without indicator if there is
// any hanging node with adjacent in a need to refine,
// schedule for refinement earlier.
scheduled = false;
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for(Storage::integer it = 0; it < CellLastLocalID(); ++it) if( isValidCell(it) )
{
Cell c = CellByLocalID(it);
if( indicator[c] == 0 )
{
bool scheduled_c = false;
//retrive only hanging nodes, this may be empty
ElementArray<Node> hanging = c->getNodes(hanging_node,0);
for(ElementArray<Node>::size_type kt = 0; kt < hanging.size() && !scheduled_c; ++kt)
{
//adjacent edges may be scheduled for refinement
ElementArray<Edge> adj = hanging[kt].getEdges();
for(ElementArray<Edge>::size_type lt = 0; lt < adj.size() && !scheduled_c; ++lt)
if( indicator[adj[lt]] != 0 )
{
indicator[c] = schedulde_counter+1;
scheduled = scheduled_c = true;
}
}
}
}
//5.Go back to 1 until no new elements scheduled
if( scheduled ) schedulde_counter++;
}
//5.Refine
BeginModification();
while(scheduled_counter)
{
scheduled_counter--;
}
ResolveModification();
//Let the user update their data
ApplyModification();
EndModification();
}
void AdaptiveGrid::Coarse(const TagInteger & indicator)
{
}
#ifndef _AGRID_H
#define _AGRID_H
#include "inmost.h"
class AdaptiveMesh : public Mesh
{
MarkerType hanging_node;
void SplitFace();
public:
AdaptiveMesh() : Mesh() {}
~AdaptiveMesh() {}
/// Indicator must be 1 on cells to be refined
/// and 0 on all other cells
void Refine(const TagInteger & indicator);
void Coarse(const TagInteger & indicator);
};
#endif //_AGRID_H
......@@ -6,17 +6,25 @@ set(SOURCE main.cpp
octgrid.h
my_glut.h)
set(SOURCE_LIGHT main_light.cpp
octgrid.cpp
octgrid.h)
add_executable(Octree ${SOURCE})
add_executable(Octree_light ${SOURCE_LIGHT})
set(TARGETS Octree Octree_light)
target_link_libraries(Octree inmost lapack blas)
foreach(vtarget ${TARGETS})
target_link_libraries(${vtarget} inmost)
#target_link_libraries(${vtarget} lapack blas)
if(USE_MPI)
message("linking with mpi")
target_link_libraries(Octree ${MPI_LIBRARIES})
message("linking ${vtarget} with mpi")
target_link_libraries(${vtarget} ${MPI_LIBRARIES})
if(MPI_LINK_FLAGS)
message("adding link flags")
set_target_properties(Octree PROPERTIES LINK_FLAGS "${MPI_LINK_FLAGS}")
message("adding link flags to ${vtarget}")
set_target_properties(${vtarget} PROPERTIES LINK_FLAGS "${MPI_LINK_FLAGS}")
endif()
endif(USE_MPI)
......@@ -28,20 +36,23 @@ if(OPENGL_FOUND)
include_directories(${OPENGL_INCLUDE_DIR})
include_directories(${GLUT_INCLUDE_DIR})
add_definitions(-D__GRAPHICS__)
target_link_libraries(Octree ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES})
target_link_libraries(${vtarget} ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES})
else(GLUT_FOUND)
message("GLUT not found")
message("GLUT not found for ${vtarget}")
endif(GLUT_FOUND)
else(OPENGL_FOUND)
message("OpenGL not found")
message("OpenGL not found for ${vtarget}")
endif(OPENGL_FOUND)
if(USE_PARTITIONER_ZOLTAN)
message("linking Octree with Zoltan")
target_link_libraries(Octree ${ZOLTAN_LIBRARIES})
message("linking ${vtarget} with Zoltan")
target_link_libraries(${vtarget} ${ZOLTAN_LIBRARIES})
endif()
if(USE_PARTITIONER_PARMETIS)
message("linking Octree with ParMETIS")
target_link_libraries(Octree ${PARMETIS_LIBRARIES})
message("linking ${vtarget} with ParMETIS")
target_link_libraries(${vtarget} ${PARMETIS_LIBRARIES})
endif()
endforeach(vtarget ${TARGETS})
#include "octgrid.h"
#include "my_glut.h"
#include "rotate.h"
#include <math.h>
#include "inmost.h"
#include <iomanip>
#include <iostream>
#define LOG(level,msg) { if (log_level >= level) cout << msg << endl; }
#define BARRIER MPI_Barrier(MPI_COMM_WORLD);
using namespace std;
struct grid thegrid;
// Variables for refine and coarse
int refine_depth = 1;
double mx = 0.75, my = 0.5;
double base_radius = 0.02;
int log_level = 3;
// Variables for MPI
int rank;
int size;
int counter = 0; // Count number of steps
/// Dump mesh to vtk file in folder "grids"
void dump_to_vtk()
{
//thegrid.mesh->ResolveShared(); // Resolve duplicate nodes
//thegrid.mesh->ExchangeGhost(2,NODE); // Construct Ghost cells in 2 layers connected via nodes
std::stringstream filename;
filename << "grids/grid_";
filename << size;
if( size == 1 )
filename << ".vtk";
else
filename << ".pvtk";
thegrid.mesh->Save(filename.str());
cout << "Process " << ::rank << ": dumped mesh to file" << endl;
}
/// Function provided to octgrid algorithm. Defines transformation from grid to grid for draw.
void transformation(double xyz[3])
{
double tmp[3];
tmp[0] = xyz[0];
tmp[1] = xyz[1];
tmp[2] = xyz[2];
tmp[0] -= 0.5;
tmp[0] *= 100.0;
tmp[1] -= 0.5;
tmp[1] *= 100.0;
xyz[0] = tmp[0];
xyz[1] = tmp[1];
//xyz[2] = 4010.0 + 10.0 * (tmp[2]-0.5);
xyz[2] = 4010.0 + 10.0 * (tmp[2]-0.5);
}
/// Function provided to octgrid algorithm. Defines transformation from grid for draw to grid.
void rev_transformation(double xyz[3])
{
double tmp[3];
tmp[0] = xyz[0];
tmp[1] = xyz[1];
tmp[2] = xyz[2];
tmp[0] /= 100.0;
tmp[0] += 0.5;
tmp[1] /= 100.0;
tmp[1] += 0.5;
xyz[0] = tmp[0];
xyz[1] = tmp[1];
xyz[2] = 4010.0 + 10.0 * (tmp[2]-0.5);
xyz[2] = (tmp[2] - 4010.0) / 10.0 + 0.5;
}
/// Function provided to octgrid algorithm.
/// Defines that cell should be unite. Returns 1 for unite else returns 0.
int cell_should_unite(struct grid * g, Cell cell)
{
const double r = base_radius;
int test = 1;
double x = cell.RealArrayDF(g->c_tags.center)[0];
double y = cell.RealArrayDF(g->c_tags.center)[1];
test &= (x-mx)*(x-mx)+(y-my)*(y-my) > r;
return test;
}
/// Function provided to octgrid algorithm.
/// Defines that cell should be split. Returns 1 for split else returns 0.
int cell_should_split(struct grid * g, Cell cell, int level)
{
double r = base_radius;
double x = cell.RealArrayDF(g->c_tags.center)[0];
double y = cell.RealArrayDF(g->c_tags.center)[1];
int c_level = cell.Integer(g->c_tags.level);
if ((x-mx)*(x-mx)+(y-my)*(y-my) < r)
{
if (c_level < refine_depth) return 1;
}
for (int level = 2; level <= refine_depth; level++)
{
if ((x-mx)*(x-mx)+(y-my)*(y-my) < r*5*(level-1))
if (c_level < refine_depth - level + 1) return 1;
}
return 0;
}
void prepare_to_correct_brothers()
{
correct_brothers(&thegrid,size,::rank, 0);
thegrid.mesh->RemoveGhost();
thegrid.mesh->Redistribute();
thegrid.mesh->ReorderEmpty(CELL|FACE|EDGE|NODE);
//thegrid.mesh->AssignGlobalID(CELL | EDGE | FACE | NODE);
}
/// Redistribute grid by partitioner
void redistribute(int type)
{
//std::stringstream str1;
//str1 << "startredist" << ::rank << ".xml";
//thegrid.mesh->Save(str1.str());
//thegrid.mesh->Save("startredist.pmf");
thegrid.mesh->ResolveShared();
//thegrid.mesh->Save("resolveshared.pmf");
//std::stringstream str2;
//str2 << "resolveshared" << ::rank << ".xml";
//thegrid.mesh->Save(str2.str());
LOG(2,"Process " << ::rank << ": redistribute. Cells: " << thegrid.mesh->NumberOfCells())
if( type == 3 )
{
TagInteger r = thegrid.mesh->RedistributeTag();
TagInteger o = thegrid.mesh->OwnerTag();
for(Mesh::iteratorCell it = thegrid.mesh->BeginCell(); it != thegrid.mesh->EndCell(); ++it)
r[it->self()] = (o[it->self()]+1)%size;
}
else
{
Partitioner * part = new Partitioner(thegrid.mesh);
// Specify the partitioner
if (type == 0) part->SetMethod(Partitioner::Parmetis, Partitioner::Partition);
if (type == 1) part->SetMethod(Partitioner::Parmetis, Partitioner::Repartition);
if (type == 2) part->SetMethod(Partitioner::Parmetis, Partitioner::Refine);
try
{
part->Evaluate();
}
catch (INMOST::ErrorType er)
{
cout << "Exception: " << er << endl;
}
delete part;
}
//thegrid.mesh->Save("parmetis.pmf");
correct_brothers(&thegrid,size,::rank, 2);
//thegrid.mesh->Save("brothers.pmf");
try
{
thegrid.mesh->Redistribute();
}
catch (INMOST::ErrorType er)
{
cout << "Exception: " << er << endl;
}
//thegrid.mesh->Save("redistribute.pmf");
thegrid.mesh->RemoveGhost();
//thegrid.mesh->Save("removeghost.pmf");
thegrid.mesh->ReorderEmpty(CELL|FACE|EDGE|NODE);
//thegrid.mesh->Save("reorderempty.pmf");
thegrid.mesh->AssignGlobalID(CELL | EDGE | FACE | NODE);
//thegrid.mesh->Save("assigngid.pmf");
LOG(2,"Process " << ::rank << ": redistribute completed")
}
int main(int argc, char ** argv)
{
int i;
int n[3] = {2,4,1};
thegrid.transformation = transformation;
thegrid.rev_transformation = rev_transformation;
thegrid.cell_should_split = cell_should_split;
thegrid.cell_should_unite = cell_should_unite;
Mesh::Initialize(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &::rank);
gridInit(&thegrid,n);
Partitioner::Initialize(&argc,&argv);
::size = thegrid.mesh->GetProcessorsNumber();
::rank = thegrid.mesh->GetProcessorRank();
std::cout << "rank " << ::rank << " size " << ::size << std::endl;
//dump_to_vtk();
std::cout << "gridAMR" << std::endl;
gridAMR(&thegrid, 0);
std::cout << "redistribute" << std::endl;
redistribute(3);
thegrid.mesh->Save("end.pvtk");
Partitioner::Finalize();
Mesh::Finalize();
return 0;
}
......@@ -1281,7 +1281,6 @@ namespace INMOST
integer hidden_count[6];
integer hidden_count_zero[6];
private:
ElementType have_global_id;
INMOST_DATA_BIG_ENUM_TYPE parallel_mesh_unique_id;
INMOST_MPI_Comm comm;
//INMOST_MPI_Group group;
......@@ -1311,9 +1310,14 @@ namespace INMOST
void AllocateSparseData (void * & q, const Tag & t);
void Init (std::string name);
public:
/// Test whether global identificator was set on certain type of elements.
/// This function does not validate correctness of global identificators.
/// @param type Single type of elements on which to test presence of global identificators.
/// @return Returns true if global identificators are present on provided type of elements.
bool HaveGlobalID (ElementType types) const;
/// Remove all data and all elements from the mesh
/// Reset geometry service and topology check flags
void Clear();
void Clear ();
/// For debug purposes
integer HandleDataPos (HandleType h) {return links[GetHandleElementNum(h)][GetHandleID(h)];}
/// For parmetis
......@@ -1323,7 +1327,7 @@ namespace INMOST
Mesh (std::string name);
Mesh (const Mesh & other);
Mesh & operator = (Mesh const & other);
~Mesh ();
virtual ~Mesh ();
/// Allocate a new marker.
/// Assert will fire in debug mode (NDEBUG not set) if you run out of space for markers, in this case you either
/// use too many markers, then you can just increase MarkerFields variable (increasing by 1 gives you 8 more markers)
......@@ -2255,6 +2259,7 @@ namespace INMOST
void ExchangeDataInnerEnd(const tag_set & tag, const parallel_storage & from, const parallel_storage & to, ElementType mask, MarkerType select, ReduceOperation op, exchange_data & storage);
void ExchangeBuffersInner(exch_buffer_type & send_bufs, exch_buffer_type & recv_bufs,std::vector<INMOST_MPI_Request> & send_reqs, std::vector<INMOST_MPI_Request> & recv_reqs);
std::vector<int> FinishRequests (std::vector<INMOST_MPI_Request> & recv_reqs);
void SortParallelStorage(parallel_storage & ghost, parallel_storage & shared,ElementType mask);
void GatherParallelStorage(parallel_storage & ghost, parallel_storage & shared, ElementType mask);
public:
#if defined(USE_PARALLEL_WRITE_TIME)
......@@ -2819,12 +2824,27 @@ namespace INMOST
/// Generally this is not needed if you use high-level algorithms for mesh modification
/// or mesh redistribution.
///
/// @param mask bitwise type mask
/// @param mask Bitwise mask of element types for which to recompute the parallel storage.
/// @see Mesh::BeginModification
/// @see Mesh::EndModification
/// @see Mesh::ExchangeMarked
/// @see Mesh::RemoveGhostElements
void RecomputeParallelStorage(ElementType mask);
/// Sort parallel storage. Parallel storage is sorted according to
/// global identificators or centroids of elements if global identificators
/// are not availible. If you manually change global identificators or
/// if global identificators are not availible and coordinates of nodes
/// change, then you should invoke this function.
/// You can check presence of global identificators on single
/// type of elements using function Mesh::HaveGlobalID.
/// This function is called automatically inside Mesh::AssignGlobalID.
/// No action will be performed if USE_PARALLEL_STORAGE is not set in inmost_common.h.
/// @param mask Bitwise mask of element types for which to sort the parallel storage.
void SortParallelStorage(ElementType mask);
/// Outputs parallel storage into xml log files.
/// USE_PARALLEL_STORAGE and USE_PARALLEL_WRITE_TIME should be activated in inmost_common.h.
/// @param mask Bitwise mask of element types for which to log the parallel storage.
void RecordParallelStorage(ElementType mask);
/// Synchronize bitwise mask of element types between processors.
///
/// Collective operation
......
......@@ -1408,16 +1408,18 @@ namespace INMOST
RestoreGeometricTags();
if( HaveTag("GLOBAL_ID") )
{
tag_global_id = GetTag("GLOBAL_ID");
for(ElementType etype = NODE; etype <= CELL; etype = etype << 1)
if( tag_global_id.isDefined(etype) ) have_global_id |= etype;
}
if( HaveTag("TOPOLOGY_ERROR_TAG") )
tag_topologyerror = GetTag("TOPOLOGY_ERROR_TAG");
#if defined(USE_MPI)
if( m_state == Mesh::Parallel )
{
ElementType have_global_id = NONE;
if( GlobalIDTag().isValid() )
{
for(ElementType etype = NODE; etype <= MESH; etype = NextElementType(etype))
if( GlobalIDTag().isDefined(etype) ) have_global_id |= etype;
}
int hgi = have_global_id, recvtype = 0;
//~ int flag = 0, recvflag;
REPORT_MPI(MPI_Allreduce(&hgi,&recvtype,1,MPI_INT,MPI_BOR,comm));
......@@ -1429,7 +1431,6 @@ namespace INMOST
if( (etype & recvtype) && !(etype & have_global_id) )
{
tag_global_id = CreateTag("GLOBAL_ID",DATA_INTEGER, etype, NONE,1);
have_global_id |= etype;
//~ flag = 1;
REPORT_VAL("created new global id tag",ElementTypeName(etype));
}
......
......@@ -1715,7 +1715,7 @@ namespace INMOST
{
Tag tag_bnd = CreateTag("CALC_BOUNDARY",DATA_INTEGER,FACE,NONE);
//we need shared unique numbers on cells
if( !(have_global_id & CELL) ) AssignGlobalID(CELL);
if( !HaveGlobalID(CELL) ) AssignGlobalID(CELL);
#if defined(USE_OMP)
#pragma omp parallel for
#endif
......
......@@ -102,7 +102,7 @@ namespace INMOST
selfid = TieElement(5);
assert(selfid == 0);
dim = 3;
have_global_id = NONE;
//have_global_id = NONE;
checkset = DEFAULT_CHECK;
errorset = 0;
new_element = hide_element = 0;
......@@ -353,7 +353,7 @@ namespace INMOST
new_element = other.new_element;
hide_element = other.hide_element;
epsilon = other.epsilon;
have_global_id = other.have_global_id;
//have_global_id = other.have_global_id;
// copy communicator
if( m_state == Mesh::Parallel ) SetCommunicator(other.comm); else comm = INMOST_MPI_COMM_WORLD;
// reestablish geometric tags and table
......@@ -478,7 +478,7 @@ namespace INMOST
new_element = other.new_element;
hide_element = other.hide_element;
epsilon = other.epsilon;
have_global_id = other.have_global_id;
//have_global_id = other.have_global_id;
// copy communicator
if( m_state == Mesh::Parallel ) SetCommunicator(other.comm); else comm = INMOST_MPI_COMM_WORLD;
// reestablish geometric tags and table
......
......@@ -1414,6 +1414,13 @@ namespace INMOST
ReleaseMarker(new_element);
hide_element = 0;
new_element = 0;
//This should be done in ResolveModification
ElementType have_global_id = NONE;
if( GlobalIDTag().isValid() )
{
for(ElementType etype = NODE; etype <= MESH; etype = NextElementType(etype))
if( GlobalIDTag().isDefined(etype) ) have_global_id |= etype;
}
if( have_global_id ) AssignGlobalID(have_global_id);
}
......
This diff is collapsed.
......@@ -119,6 +119,11 @@ int main(int argc,char ** argv)
case 2: action = Partitioner::Refine; break;
}
m->ResolveShared();
//std::stringstream str;
//str << "dump" << m->GetProcessorRank() << ".xml";
//m->Save(str.str());
if( itype == 8 ) //pass the whole local mesh on the current processor to the next one
{
if( m->OwnerTag().isValid() )
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment