Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
I
INMOST
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kirill Terekhov
INMOST
Commits
3a09573c
Commit
3a09573c
authored
Jun 15, 2016
by
Kirill Terekhov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Some changes
parent
cf1303c0
Changes
4
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
953 additions
and
300 deletions
+953
-300
Examples/ADMFD/main_cells.cpp
Examples/ADMFD/main_cells.cpp
+448
-0
Source/Headers/inmost_mesh.h
Source/Headers/inmost_mesh.h
+260
-259
Source/Mesh/parallel.cpp
Source/Mesh/parallel.cpp
+109
-40
TODO.txt
TODO.txt
+136
-1
No files found.
Examples/ADMFD/main_
harm
.cpp
→
Examples/ADMFD/main_
cells
.cpp
View file @
3a09573c
This diff is collapsed.
Click to expand it.
Source/Headers/inmost_mesh.h
View file @
3a09573c
This diff is collapsed.
Click to expand it.
Source/Mesh/parallel.cpp
View file @
3a09573c
...
...
@@ -827,30 +827,59 @@ namespace INMOST
#endif //USE_PARALLEL_STORAGE
//determine which bboxes i intersect
dynarray
<
int
,
64
>
procs
;
Storage
::
real
bbox
[
6
];
dynarray
<
Storage
::
real
,
384
>
bboxs
(
mpisize
*
6
);
Storage
::
real
bbox
[
6
];
//local bounding box
std
::
vector
<
Storage
::
real
>
bboxs
(
mpisize
*
6
);
//Compute local bounding box containing nodes.
//Will be more convinient to compute (or store)
//and communicate local octree over all the nodes.
for
(
integer
k
=
0
;
k
<
dim
;
k
++
)
{
bbox
[
k
]
=
1e20
;
bbox
[
k
+
dim
]
=
-
1e20
;
}
for
(
iteratorNode
it
=
BeginNode
();
it
!=
EndNode
();
it
++
)
#if defined(USE_OMP)
#pragma omp parallel
#endif
{
Storage
::
real_array
arr
=
it
->
Coords
()
;
real
bbox0
[
6
]
;
for
(
integer
k
=
0
;
k
<
dim
;
k
++
)
{
if
(
arr
[
k
]
<
bbox
[
k
]
)
bbox
[
k
]
=
arr
[
k
];
if
(
arr
[
k
]
>
bbox
[
k
+
dim
]
)
bbox
[
k
+
dim
]
=
arr
[
k
];
bbox0
[
k
]
=
1e20
;
bbox0
[
k
+
dim
]
=
-
1e20
;
}
#if defined(USE_OMP)
#pragma omp for
#endif
for
(
integer
nit
=
0
;
nit
<
NodeLastLocalID
();
++
nit
)
if
(
isValidNode
(
nit
)
)
{
Node
it
=
NodeByLocalID
(
nit
);
Storage
::
real_array
arr
=
it
->
Coords
();
for
(
integer
k
=
0
;
k
<
dim
;
k
++
)
{
if
(
arr
[
k
]
<
bbox0
[
k
]
)
bbox0
[
k
]
=
arr
[
k
];
if
(
arr
[
k
]
>
bbox0
[
k
+
dim
]
)
bbox0
[
k
+
dim
]
=
arr
[
k
];
}
}
#if defined(USE_OMP)
#pragma omp critical
#endif
{
for
(
integer
k
=
0
;
k
<
dim
;
k
++
)
{
if
(
bbox0
[
k
]
<
bbox
[
k
]
)
bbox
[
k
]
=
bbox0
[
k
];
if
(
bbox0
[
k
]
>
bbox
[
k
+
dim
]
)
bbox
[
k
+
dim
]
=
bbox0
[
k
];
}
}
}
// write down bounding boxes
for
(
integer
k
=
0
;
k
<
dim
;
k
++
)
{
REPORT_VAL
(
"min"
,
bbox
[
k
]);
REPORT_VAL
(
"max"
,
bbox
[
dim
+
k
]);
}
// communicate bounding boxes
REPORT_MPI
(
MPI_Allgather
(
&
bbox
[
0
],
dim
*
2
,
INMOST_MPI_DATA_REAL_TYPE
,
&
bboxs
[
0
],
dim
*
2
,
INMOST_MPI_DATA_REAL_TYPE
,
comm
));
// find all processors that i communicate with
for
(
int
k
=
0
;
k
<
mpisize
;
k
++
)
if
(
k
!=
mpirank
)
{
...
...
@@ -888,14 +917,25 @@ namespace INMOST
if
(
same_boxes
)
{
REPORT_STR
(
"All bounding boxes are the same - assuming that mesh is replicated over all nodes"
);
for
(
Mesh
::
iteratorElement
it
=
BeginElement
(
CELL
|
EDGE
|
FACE
|
NODE
);
it
!=
EndElement
();
it
++
)
//for(Mesh::iteratorElement it = BeginElement(CELL | EDGE | FACE | NODE); it != EndElement(); it++)
for
(
ElementType
etype
=
NODE
;
etype
<=
CELL
;
etype
=
NextElementType
(
etype
)
)
{
Storage
::
integer_array
arr
=
it
->
IntegerArrayDV
(
tag_processors
);
arr
.
resize
(
mpisize
);
for
(
int
k
=
0
;
k
<
mpisize
;
k
++
)
arr
[
k
]
=
k
;
it
->
IntegerDF
(
tag_owner
)
=
0
;
if
(
mpirank
==
0
)
SetStatus
(
*
it
,
Element
::
Shared
);
else
SetStatus
(
*
it
,
Element
::
Ghost
);
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for
(
integer
eit
=
0
;
eit
<
LastLocalID
(
etype
);
++
eit
)
if
(
isValidElement
(
etype
,
eit
)
)
{
Element
it
=
ElementByLocalID
(
etype
,
eit
);
integer_array
arr
=
it
->
IntegerArrayDV
(
tag_processors
);
arr
.
resize
(
mpisize
);
for
(
int
k
=
0
;
k
<
mpisize
;
k
++
)
arr
[
k
]
=
k
;
it
->
IntegerDF
(
tag_owner
)
=
0
;
if
(
mpirank
==
0
)
SetStatus
(
it
->
GetHandle
(),
Element
::
Shared
);
else
SetStatus
(
it
->
GetHandle
(),
Element
::
Ghost
);
}
}
ComputeSharedProcs
();
RecomputeParallelStorage
(
CELL
|
EDGE
|
FACE
|
NODE
);
...
...
@@ -908,7 +948,7 @@ namespace INMOST
std
::
sort
(
it
->
second
[
i
].
begin
(),
it
->
second
[
i
].
end
(),
GlobalIDComparator
(
this
));
//qsort(&it->second[i][0],it->second[i].size(),sizeof(Element *),CompareElementsCGID);
}
for
(
parallel_storage
::
iterator
it
=
ghost_elements
.
begin
();
it
!=
ghost_elements
.
end
();
it
++
)
for
(
parallel_storage
::
iterator
it
=
ghost_elements
.
begin
();
it
!=
ghost_elements
.
end
();
it
++
)
for
(
int
i
=
0
;
i
<
4
;
i
++
)
{
if
(
!
it
->
second
[
i
].
empty
()
)
...
...
@@ -934,9 +974,14 @@ namespace INMOST
REPORT_VAL
(
"time"
,
time
);
for
(
iteratorNode
it
=
BeginNode
();
it
!=
EndNode
();
it
++
)
//for(iteratorNode it = BeginNode(); it != EndNode(); it++)
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for
(
integer
nit
=
0
;
nit
<
NodeLastLocalID
();
++
nit
)
if
(
isValidNode
(
nit
)
)
{
Node
it
=
NodeByLocalID
(
nit
);
Storage
::
integer_array
arr
=
it
->
IntegerArrayDV
(
tag_processors
);
arr
.
resize
(
1
);
arr
[
0
]
=
mpirank
;
...
...
@@ -1178,34 +1223,41 @@ namespace INMOST
REPORT_VAL
(
"type"
,
ElementTypeName
(
current_mask
));
//int owned_elems = 0;
//int shared_elems = 0;
//int owned_elems = 0;
//int shared_elems = 0;
int
owner
;
Element
::
Status
estat
;
time
=
Timer
();
//Determine what processors potentially share the element
for
(
Mesh
::
iteratorElement
it
=
BeginElement
(
current_mask
);
it
!=
EndElement
();
it
++
)
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for
(
integer
eit
=
0
;
eit
<
LastLocalID
(
current_mask
);
++
eit
)
{
determine_my_procs_low
(
this
,
*
it
,
result
,
intersection
);
Storage
::
integer_array
p
=
it
->
IntegerArrayDV
(
tag_processors
);
if
(
result
.
empty
()
)
if
(
isValidElement
(
current_mask
,
eit
)
)
{
p
.
clear
();
p
.
push_back
(
mpirank
);
//++owned_elems;
Element
it
=
ElementByLocalID
(
current_mask
,
eit
);
determine_my_procs_low
(
this
,
it
->
GetHandle
(),
result
,
intersection
);
Storage
::
integer_array
p
=
it
->
IntegerArrayDV
(
tag_processors
);
if
(
result
.
empty
()
)
{
p
.
clear
();
p
.
push_back
(
mpirank
);
//++owned_elems;
}
else
{
p
.
replace
(
p
.
begin
(),
p
.
end
(),
result
.
begin
(),
result
.
end
());
//if( result.size() == 1 && result[0] == mpirank )
// ++owned_elems;
//else ++shared_elems;
}
}
else
{
p
.
replace
(
p
.
begin
(),
p
.
end
(),
result
.
begin
(),
result
.
end
());
//if( result.size() == 1 && result[0] == mpirank )
// ++owned_elems;
//else ++shared_elems;
}
}
time
=
Timer
()
-
time
;
//REPORT_VAL("predicted owned elements",owned_elems);
//REPORT_VAL("predicted shared elements",shared_elems);
//REPORT_VAL("predicted owned elements",owned_elems);
//REPORT_VAL("predicted shared elements",shared_elems);
REPORT_STR
(
"Predict processors for elements"
);
REPORT_VAL
(
"time"
,
time
);
...
...
@@ -1213,7 +1265,7 @@ namespace INMOST
time
=
Timer
();
//Initialize mapping that helps get local id by global id
std
::
vector
<
std
::
pair
<
int
,
int
>
>
mapping
;
REPORT_VAL
(
"mapping type"
,
ElementTypeName
(
current_mask
>>
1
));
REPORT_VAL
(
"mapping type"
,
ElementTypeName
(
current_mask
>>
1
));
for
(
Mesh
::
iteratorElement
it
=
BeginElement
(
current_mask
>>
1
);
it
!=
EndElement
();
it
++
)
{
mapping
.
push_back
(
std
::
make_pair
(
it
->
GlobalID
(),
it
->
LocalID
()));
...
...
@@ -1822,10 +1874,27 @@ namespace INMOST
#if defined(USE_MPI)
std
::
set
<
int
>
shared_procs
;
int
mpirank
=
GetProcessorRank
();
for
(
Mesh
::
iteratorNode
it
=
BeginNode
();
it
!=
EndNode
();
it
++
)
#if defined(USE_OMP)
#pragma omp parallel
#endif
{
Storage
::
integer_array
p
=
it
->
IntegerArrayDV
(
tag_processors
);
for
(
Storage
::
integer_array
::
iterator
kt
=
p
.
begin
();
kt
!=
p
.
end
();
kt
++
)
shared_procs
.
insert
(
*
kt
);
std
::
set
<
int
>
shared_procs_local
;
#if defined(USE_OMP)
#pragma omp for
#endif
for
(
integer
nit
=
0
;
nit
<
NodeLastLocalID
();
nit
++
)
if
(
isValidNode
(
nit
)
)
{
Node
it
=
NodeByLocalID
(
nit
);
integer_array
p
=
it
->
IntegerArrayDV
(
tag_processors
);
for
(
integer_array
::
iterator
kt
=
p
.
begin
();
kt
!=
p
.
end
();
kt
++
)
shared_procs_local
.
insert
(
*
kt
);
}
#if defined(USE_OMP)
#pragma omp critical
#endif
{
shared_procs
.
insert
(
shared_procs_local
.
begin
(),
shared_procs_local
.
end
());
}
}
std
::
set
<
int
>::
iterator
ir
=
shared_procs
.
find
(
mpirank
);
if
(
ir
!=
shared_procs
.
end
()
)
shared_procs
.
erase
(
ir
);
...
...
TODO.txt
View file @
3a09573c
...
...
@@ -56,7 +56,8 @@
46) ( ) . ,
=====================================================================================
0) ,
14) ReferenceArray?
...
...
@@ -104,6 +105,140 @@
58) vtk- "VTK_WRITE_SETS_TO_FILES" = "YES"
59) Openmp
60) ResolveShared MarkerType, ( ResolveModification)
61) ResolveModification .
62) DATA_UNKNOWN unknown
63) expr
64) Automatizator
65) Bulk MarkerType.
66)
65) (???)
66) Map RowMerger, ( OrderInfo )
67) Graph, Partitioner Solver
68) Redistribute tag
69) ghost- Partitioner::Evaluate, ReduceData.
====================================================================================
INMOST Mesh
0) TagDenseFixed, TagSparseFixed, TagDenseVariable, TagSparseVariable
dynamic_variable, static_variable may be templatized with respect to type of the Tag
1) rewrite parallel_storage
(?) RecomputeParallelStorage detect element types in ExchangeMarked
Either organise separation per element type in ElementSet, or store 4 element sets.
2) volume for nonconvex elements
implement in incedence_matrix in modify.cpp
3) (ok) New mesh format reader, XML
search tag positions in file
parallel reader
4) octree structure for intersection of remote nodes
in ResolveShared
in UnpackElementsData
in file reading procedures
5) algorithm for dynamic mesh adaptation in parallel
ResolveShared should depend on marker
Algorithm of shortest path to close the gap in shared skin
OR remap processors numbers from old cells onto new ones using bijection of cell centers
7) Introduce INMOST DataBase, that should be outside
(?) ElementSet should have data for it's elements, Mesh should become ElementSet
Move MGetLink and all the mechanism to access data into TagManager
Move inline functions for Storage from inmost_mesh to inmost_data
9) (test) add DATA_REMOTE_REFERENCE, RemoteHandleType data type and structure that will allow to link another mesh
(ok) static Mesh::GetMesh(std::string name), structure to collect meshes by name.
Save multiple meshes to file
10) (test,ok) algorithm in PackTagData for DATA_VARIABLE
(ok) GetData,SetData,SetDataSize - add DATA_VARIABLE
(ok) GetDataSize - returns number of entries in array
(ok) GetDataCapacity - returns space capacity occupied by all the data (specific for DATA_VARIABLE)
(ok) GetData - puts all the data into allocated array of size GetDataSpace
(ok) SetData - given the number of entries is known, knows how to fill the internal data
(ok) MPI type for DATA_VARIABLE
Destroy type on Finalize
Test!
(ok) Should get capacity of unpacked data, provide function ComputeCapacity
See next
(ok) Organize GetDataCapacity with parameter that represents storage and size information
(ok) Provide tools to retrive data from bytes arrays in Unpack functions
(ok) variable::RecordEntry - write into array of Row::entry
(ok) variable::RetriveEntry - get out of array of pairs
(ok) variable::RetriveSizeEntry - how many entries used to store
(ok) variable::RequiredSizeEntry - how many entries are needed to store
11) DATA_REFERENCE, DATA_REMOTE_REFERENCE can use PackElements in PackTagData
Have to make rounds of communication as in ExchangeMarked
Presence of Tag of type DATA_REFERENCE and DATA_REMOTE_REFERENCE should internally mark the elements, invoke ExhchangeMarked
12) Orientation of edges as directed, order faces with right hand side rule, each face's normal should follow right hand side rule
Insertion of faces into edge
Orientation of faces
Optimized algorithms:
all cells of the edge (traverse faces, get back cell)
all nodes of the face (traverse edges, always get first node)
all faces of the node (traverse only edges that are directed outside or inside, get their faces)
INMOST Solvers:
-1) (ok) Move annotation into matrix as outstanding structure that can be allocated on demand
0) (no) BasicRow class for autodiff or above
1) Rewrite BCGS as with Anderson Acceleration
3) Internal CSRMatrix format
3.1) Reorderings for CSRMatrix with PQ (RCM,ColAMD,MetisND)
3.2) Reorderings with execution tree (Mondriaan)
3.3) CSRMatrix functions - transpose, swap row/column, compute schur with LU, multiply vector, additive schwartz...
3.4) types for CSRMatrix
3.5) fast row/col exchange for MPTILUC
4) Fast laplacian matrix factorization graphs: Horst Simon, C:\Users\KIRILL\Documents\Read\solver\laplacian_graph
5) condition estimation for ilu2
6) Purely algebraic CPR (see galerkin principle)
7) PrepareMatrix should be able to merge rows in parallel, just sum the row on lowest-rank processor, have to return the solution on every processor.
INMOST Autodiff
0) Gateaux derivatives
1) Clean old staff
2) Superproblem definition??
3) Read about Fenics
4) (ok) Automatizator::MakeCurrent, Automatizator::GetCurrent, Automatizator::HasCurrent
(ok) variable should detect presence of current automatizator and access it's structures
5) Abstraction of KeyValueTable,
add cubic spline,
add bc treatment
6) condition_etype_variable, condition_marker_variable - needed in THREEPHASE in interpolation
7) support std::swap
8) expression foreach that will iterate over elements or elements handles
INMOST Nonlinear
1. Newton
2. Line search
3. Anderson
4. http://fenicsproject.org/documentation/tutorial/nonlinear.html
nonlinear solvers: Hans De Sterck
Nonlinear solvers for INMOST L-BFGS
1) Rewrite onephase into class with ComputeFunction, ComputeGradient, ComputeHessian(?)
2) Create abstract problem class for nonlinear solvers within INMOST
3) reimplement Newton/LS/AA into INMOST
4) implement L-BFGS https://github.com/PatWie/CppNumericalSolvers/blob/master/src/LbfgsSolver.cpp
5) hessian https://en.wikipedia.org/wiki/Hessian_automatic_differentiation
====================================================================================
7.MSPP: () : , ,
8.MSPP:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment