Commit cdb26b36 authored by Kirill Terekhov's avatar Kirill Terekhov
Browse files

overcome 2gb limit in .mtx writer

parent ed3c9b9d
...@@ -482,7 +482,7 @@ namespace INMOST ...@@ -482,7 +482,7 @@ namespace INMOST
while( shift != datasize ) while( shift != datasize )
{ {
chunk = std::min(static_cast<INMOST_DATA_BIG_ENUM_TYPE>(INT_MAX),datasize-shift); chunk = std::min(static_cast<INMOST_DATA_BIG_ENUM_TYPE>(INT_MAX),datasize-shift);
REPORT_MPI(ierr = MPI_File_write_at(fh,offset+shift,&local_data[shift],static_cast<INMOST_MPI_SIZE>(chunk),MPI_CHAR,&stat)); REPORT_MPI(ierr = MPI_File_write_at(fh,offset+shift,local_data.c_str()+shift,static_cast<INMOST_MPI_SIZE>(chunk),MPI_CHAR,&stat));
if( ierr != MPI_SUCCESS ) REPORT_MPI(MPI_Abort(GetCommunicator(),__LINE__)); if( ierr != MPI_SUCCESS ) REPORT_MPI(MPI_Abort(GetCommunicator(),__LINE__));
shift += chunk; shift += chunk;
} }
......
...@@ -2081,17 +2081,13 @@ namespace INMOST ...@@ -2081,17 +2081,13 @@ namespace INMOST
{ {
if( size ) if( size )
{ {
bool flag = true;
const Storage::integer * recv = static_cast<const Storage::integer *>(static_cast<const void *>(data)); const Storage::integer * recv = static_cast<const Storage::integer *>(static_cast<const void *>(data));
Storage::integer_array arr = element->IntegerArray(tag); Storage::integer_array arr = element->IntegerArray(tag);
for(Storage::integer_array::iterator it = arr.begin(); it != arr.end(); it++) for(int k = 0; k < size; ++k)
if( *it == recv[0] ) {
{ if( std::find(arr.begin(),arr.end(),recv[k]) == arr.end() )
flag = false; arr.push_back(recv[k]);
break; }
}
if( flag )
arr.push_back(recv[0]);
} }
} }
...@@ -2121,8 +2117,11 @@ namespace INMOST ...@@ -2121,8 +2117,11 @@ namespace INMOST
for(integer it = 0; it < FaceLastLocalID(); ++it) if( isValidFace(it) ) for(integer it = 0; it < FaceLastLocalID(); ++it) if( isValidFace(it) )
{ {
Face face = FaceByLocalID(it); Face face = FaceByLocalID(it);
assert(face->IntegerArray(tag_bnd).size() <= 2);
if( face->IntegerArray(tag_bnd).size() == 1 ) if( face->IntegerArray(tag_bnd).size() == 1 )
face->SetMarker(boundary_marker); face->SetMarker(boundary_marker);
else
face->RemMarker(boundary_marker);
} }
DeleteTag(tag_bnd); DeleteTag(tag_bnd);
} }
...@@ -2134,7 +2133,10 @@ namespace INMOST ...@@ -2134,7 +2133,10 @@ namespace INMOST
for(integer it = 0; it < FaceLastLocalID(); ++it) if( isValidFace(it) ) for(integer it = 0; it < FaceLastLocalID(); ++it) if( isValidFace(it) )
{ {
Face face = FaceByLocalID(it); Face face = FaceByLocalID(it);
if( face->Boundary() ) face->SetMarker(boundary_marker); if( face->Boundary() )
face->SetMarker(boundary_marker);
else
face->RemMarker(boundary_marker);
} }
} }
......
...@@ -123,8 +123,9 @@ namespace INMOST ...@@ -123,8 +123,9 @@ namespace INMOST
{ {
if( size ) if( size )
{ {
const Storage::integer * recv = static_cast<const Storage::integer *>(static_cast<const void *>(data)); const Storage::integer * recv = static_cast<const Storage::integer *>(static_cast<const void *>(data));
Storage::integer_array arr = e->IntegerArray(tag); Storage::integer_array arr = e->IntegerArray(tag);
//for(int k = 0; k < size; ++k)
arr.push_back(recv[0]); arr.push_back(recv[0]);
} }
} }
...@@ -1434,6 +1435,8 @@ namespace INMOST ...@@ -1434,6 +1435,8 @@ namespace INMOST
int position = 0; int position = 0;
ENTER_BLOCK(); ENTER_BLOCK();
//TODO: overflow
MPI_Pack_size(static_cast<int>(pack_real.size()),INMOST_MPI_DATA_REAL_TYPE,comm,&sendsize); MPI_Pack_size(static_cast<int>(pack_real.size()),INMOST_MPI_DATA_REAL_TYPE,comm,&sendsize);
exch_data.resize(sendsize); exch_data.resize(sendsize);
if( sendsize > 0 ) MPI_Pack(&pack_real[0],static_cast<INMOST_MPI_SIZE>(pack_real.size()),INMOST_MPI_DATA_REAL_TYPE,&exch_data[0],static_cast<INMOST_MPI_SIZE>(exch_data.size()),&position,comm); if( sendsize > 0 ) MPI_Pack(&pack_real[0],static_cast<INMOST_MPI_SIZE>(pack_real.size()),INMOST_MPI_DATA_REAL_TYPE,&exch_data[0],static_cast<INMOST_MPI_SIZE>(exch_data.size()),&position,comm);
...@@ -1449,14 +1452,7 @@ namespace INMOST ...@@ -1449,14 +1452,7 @@ namespace INMOST
std::vector< MPI_Request > send_reqs, recv_reqs; std::vector< MPI_Request > send_reqs, recv_reqs;
exch_buffer_type send_buffs(procs.size()), recv_buffs(procs.size()); exch_buffer_type send_buffs(procs.size()), recv_buffs(procs.size());
std::vector<int> done; std::vector<int> done;
//~ #if defined(USE_MPI_P2P)
//~ unsigned * sendsizeall = shared_space;
//~ unsigned usend[2] = {sendsize,pack_real.size()};
//~ REPORT_MPI(MPI_Win_fence(0,window)); //start exchange session
//~ for(unsigned k = 0; k < procs.size(); k++)
//~ REPORT_MPI(MPI_Put(usend,2,MPI_UNSIGNED,procs[k],mpirank*2,2,MPI_UNSIGNED,window));
//~ REPORT_MPI(MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED,window)); //end exchange session
//~ #else
std::vector<unsigned> sendsizeall(mpisize*2); std::vector<unsigned> sendsizeall(mpisize*2);
int pack_size2 = 0; int pack_size2 = 0;
unsigned usend[2] = {static_cast<unsigned>(sendsize),static_cast<unsigned>(pack_real.size())}; unsigned usend[2] = {static_cast<unsigned>(sendsize),static_cast<unsigned>(pack_real.size())};
...@@ -1487,8 +1483,7 @@ namespace INMOST ...@@ -1487,8 +1483,7 @@ namespace INMOST
REPORT_MPI(MPI_Waitall(static_cast<INMOST_MPI_SIZE>(send_reqs.size()),&send_reqs[0],MPI_STATUSES_IGNORE)); REPORT_MPI(MPI_Waitall(static_cast<INMOST_MPI_SIZE>(send_reqs.size()),&send_reqs[0],MPI_STATUSES_IGNORE));
} }
EXIT_BLOCK(); EXIT_BLOCK();
//~ REPORT_MPI(MPI_Allgather(usend,2,MPI_UNSIGNED,&sendsizeall[0],2,MPI_UNSIGNED,comm));
//~ #endif
ENTER_BLOCK(); ENTER_BLOCK();
{ {
...@@ -1515,6 +1510,8 @@ namespace INMOST ...@@ -1515,6 +1510,8 @@ namespace INMOST
int count = 0; int count = 0;
int position = 0; int position = 0;
unpack_real.resize(sendsizeall[recv_buffs[*qt].first*2+1]); unpack_real.resize(sendsizeall[recv_buffs[*qt].first*2+1]);
//TODO: overflow
MPI_Unpack(&recv_buffs[*qt].second[0],static_cast<INMOST_MPI_SIZE>(recv_buffs[*qt].second.size()),&position,&unpack_real[0],static_cast<INMOST_MPI_SIZE>(unpack_real.size()),INMOST_MPI_DATA_REAL_TYPE,comm); MPI_Unpack(&recv_buffs[*qt].second[0],static_cast<INMOST_MPI_SIZE>(recv_buffs[*qt].second.size()),&position,&unpack_real[0],static_cast<INMOST_MPI_SIZE>(unpack_real.size()),INMOST_MPI_DATA_REAL_TYPE,comm);
std::vector<Storage::real>::iterator it1 = pack_real.begin() , it2 = unpack_real.begin(); std::vector<Storage::real>::iterator it1 = pack_real.begin() , it2 = unpack_real.begin();
while(it1 != pack_real.end() && it2 != unpack_real.end() ) while(it1 != pack_real.end() && it2 != unpack_real.end() )
...@@ -1751,6 +1748,8 @@ namespace INMOST ...@@ -1751,6 +1748,8 @@ namespace INMOST
REPORT_VAL("gathered elements",elements[m].size()); REPORT_VAL("gathered elements",elements[m].size());
message_send[0] = static_cast<int>(message_send.size()); message_send[0] = static_cast<int>(message_send.size());
//TODO: overflow
MPI_Pack_size(static_cast<INMOST_MPI_SIZE>(message_send.size()),MPI_INT,comm,&sendsize); MPI_Pack_size(static_cast<INMOST_MPI_SIZE>(message_send.size()),MPI_INT,comm,&sendsize);
send_buffs[m].first = *p; send_buffs[m].first = *p;
send_buffs[m].second.resize(sendsize); send_buffs[m].second.resize(sendsize);
...@@ -1785,6 +1784,7 @@ namespace INMOST ...@@ -1785,6 +1784,7 @@ namespace INMOST
break; break;
} }
if( pos == -1 ) throw Impossible; if( pos == -1 ) throw Impossible;
//TODO: overflow
MPI_Unpack(&recv_buffs[*qt].second[0],static_cast<INMOST_MPI_SIZE>(recv_buffs[*qt].second.size()),&position,&size,1,MPI_INT,comm); MPI_Unpack(&recv_buffs[*qt].second[0],static_cast<INMOST_MPI_SIZE>(recv_buffs[*qt].second.size()),&position,&size,1,MPI_INT,comm);
REPORT_VAL("unpacked message size",size-1); REPORT_VAL("unpacked message size",size-1);
message_recv[pos].resize(size-1); message_recv[pos].resize(size-1);
...@@ -2281,11 +2281,9 @@ namespace INMOST ...@@ -2281,11 +2281,9 @@ namespace INMOST
#if defined(USE_MPI) #if defined(USE_MPI)
if( m_state == Parallel ) if( m_state == Parallel )
{ {
INMOST_DATA_BIG_ENUM_TYPE number,shift[5], shift_recv[5], local_shift; INMOST_DATA_ENUM_TYPE number,shift[5], shift_recv[5], local_shift;
//int mpirank = GetProcessorRank(),mpisize = GetProcessorsNumber();
int num; int num;
//std::vector<INMOST_DATA_BIG_ENUM_TYPE> shifts(mpisize*4); memset(shift,0,sizeof(INMOST_DATA_ENUM_TYPE));
memset(shift,0,sizeof(INMOST_DATA_BIG_ENUM_TYPE));
for(ElementType currenttype = NODE; currenttype <= ESET; currenttype = NextElementType(currenttype) ) for(ElementType currenttype = NODE; currenttype <= ESET; currenttype = NextElementType(currenttype) )
{ {
if( mask & currenttype ) if( mask & currenttype )
...@@ -2302,7 +2300,7 @@ namespace INMOST ...@@ -2302,7 +2300,7 @@ namespace INMOST
} }
{ {
int ierr; int ierr;
REPORT_MPI(ierr = MPI_Scan(shift,shift_recv,4,INMOST_MPI_DATA_BIG_ENUM_TYPE,MPI_SUM,GetCommunicator())); REPORT_MPI(ierr = MPI_Scan(shift,shift_recv,5,INMOST_MPI_DATA_ENUM_TYPE,MPI_SUM,GetCommunicator()));
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
} }
for(ElementType currenttype = NODE; currenttype <= ESET; currenttype = NextElementType(currenttype) ) for(ElementType currenttype = NODE; currenttype <= ESET; currenttype = NextElementType(currenttype) )
...@@ -2331,7 +2329,7 @@ namespace INMOST ...@@ -2331,7 +2329,7 @@ namespace INMOST
else else
{ {
#endif //USE_MPI #endif //USE_MPI
INMOST_DATA_BIG_ENUM_TYPE number; INMOST_DATA_ENUM_TYPE number;
for(ElementType currenttype = NODE; currenttype <= ESET; currenttype = NextElementType(currenttype) ) for(ElementType currenttype = NODE; currenttype <= ESET; currenttype = NextElementType(currenttype) )
if( mask & currenttype ) if( mask & currenttype )
{ {
...@@ -2485,12 +2483,13 @@ namespace INMOST ...@@ -2485,12 +2483,13 @@ namespace INMOST
ElementArray<Element> adj = it->getAdjElements(CELL); ElementArray<Element> adj = it->getAdjElements(CELL);
for(ElementArray<Element>::iterator jt = adj.begin(); jt != adj.end(); jt++) for(ElementArray<Element>::iterator jt = adj.begin(); jt != adj.end(); jt++)
{ {
assert(jt->IntegerDF(tag_owner) >= 0 && jt->IntegerDF(tag_owner) < GetProcessorsNumber());
arr.push_back(jt->GlobalID()); //identificator of the cell arr.push_back(jt->GlobalID()); //identificator of the cell
arr.push_back(jt->IntegerDF(tag_owner)); //owner of the cell arr.push_back(jt->IntegerDF(tag_owner)); //owner of the cell
} }
} }
#if defined(USE_PARALLEL_WRITE_TIME) #if defined(USE_PARALLEL_WRITE_TIME)
++numfacesperproc[it->IntegerDF(tag_owner)]; ++numfacesperproc[it->IntegerDF(tag_owner)];
#endif #endif
} }
REPORT_STR("number of ghosted or shared faces"); REPORT_STR("number of ghosted or shared faces");
...@@ -2562,6 +2561,7 @@ namespace INMOST ...@@ -2562,6 +2561,7 @@ namespace INMOST
for(int i = 0; i < 2; i++) //assert checks that there are two cells for(int i = 0; i < 2; i++) //assert checks that there are two cells
{ {
Storage::integer owner = skin_data[i*2+1]; //cell owner Storage::integer owner = skin_data[i*2+1]; //cell owner
assert(owner >= 0 && owner < GetProcessorsNumber());
if( owner != mpirank ) if( owner != mpirank )
{ {
skin_faces[owner].push_back(*it); skin_faces[owner].push_back(*it);
...@@ -2581,7 +2581,7 @@ namespace INMOST ...@@ -2581,7 +2581,7 @@ namespace INMOST
REPORT_VAL("skin faces",it->second); REPORT_VAL("skin faces",it->second);
} }
#endif #endif
DeleteTag(tag_skin); tag_skin = DeleteTag(tag_skin);
#if defined(DEBUG_COMPUTE_SHARED_SKIN_SET) #if defined(DEBUG_COMPUTE_SHARED_SKIN_SET)
{ {
Storage::integer keynum; Storage::integer keynum;
...@@ -2630,7 +2630,7 @@ namespace INMOST ...@@ -2630,7 +2630,7 @@ namespace INMOST
else else
{ {
element_set all_visited; element_set all_visited;
Tag on_skin = CreateTag("TEMPORARY_ON_SKIN",DATA_INTEGER,bridge_type,bridge_type); Tag on_skin = CreateTag("TEMPORARY_ON_SKIN_BRIDGE",DATA_INTEGER,bridge_type,bridge_type);
REPORT_STR("gathering specified elements on skin"); REPORT_STR("gathering specified elements on skin");
REPORT_VAL("type",ElementTypeName(bridge_type)); REPORT_VAL("type",ElementTypeName(bridge_type));
...@@ -2668,6 +2668,7 @@ namespace INMOST ...@@ -2668,6 +2668,7 @@ namespace INMOST
Storage::integer_array os = IntegerArray(*it,on_skin); Storage::integer_array os = IntegerArray(*it,on_skin);
for(Storage::integer_array::iterator p = os.begin(); p != os.end(); p++) for(Storage::integer_array::iterator p = os.begin(); p != os.end(); p++)
{ {
assert(*p >= 0 && *p < GetProcessorsNumber());
if( *p != mpirank ) if( *p != mpirank )
{ {
bridge[*p].push_back(*it); bridge[*p].push_back(*it);
...@@ -2677,6 +2678,7 @@ namespace INMOST ...@@ -2677,6 +2678,7 @@ namespace INMOST
} }
} }
} }
on_skin = DeleteTag(on_skin);
REPORT_STR("number of shared elements") REPORT_STR("number of shared elements")
#if defined(USE_PARALLEL_WRITE_TIME) #if defined(USE_PARALLEL_WRITE_TIME)
for(std::map<int,int>::iterator it = numelemsperproc.begin(); it != numelemsperproc.end(); ++it) for(std::map<int,int>::iterator it = numelemsperproc.begin(); it != numelemsperproc.end(); ++it)
...@@ -3556,13 +3558,13 @@ namespace INMOST ...@@ -3556,13 +3558,13 @@ namespace INMOST
Storage::integer_array v = it->IntegerArrayDV(tag_processors); Storage::integer_array v = it->IntegerArrayDV(tag_processors);
for(Storage::integer_array::iterator vit = v.begin(); vit != v.end(); vit++) for(Storage::integer_array::iterator vit = v.begin(); vit != v.end(); vit++)
{ {
if( !(*vit >= 0 && *vit < mpisize) ) //~ if( !(*vit >= 0 && *vit < mpisize) )
{ //~ {
// err++; //~ // err++;
REPORT_STR(GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc in list " << *vit << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() ); //~ REPORT_STR(GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc in list " << *vit << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() );
std::cout << GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc in list " << *vit << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() << std::endl; //~ std::cout << GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc in list " << *vit << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() << std::endl;
} //~ }
//assert(*vit >= 0 && *vit < mpisize); assert(*vit >= 0 && *vit < mpisize);
if( *vit != mpirank ) if( *vit != mpirank )
shared[*vit][ElementNum(it->GetElementType())].push_back(*it); shared[*vit][ElementNum(it->GetElementType())].push_back(*it);
} }
...@@ -3570,13 +3572,13 @@ namespace INMOST ...@@ -3570,13 +3572,13 @@ namespace INMOST
else if( estat == Element::Ghost ) else if( estat == Element::Ghost )
{ {
int proc = it->IntegerDF(tag_owner); int proc = it->IntegerDF(tag_owner);
if( !(proc >= 0 && proc < mpisize) ) //~ if( !(proc >= 0 && proc < mpisize) )
{ //~ {
// err++; //~ // err++;
REPORT_STR(GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc owner " << proc << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() ); //~ REPORT_STR(GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc owner " << proc << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() );
std::cout << GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc owner " << proc << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() << std::endl; //~ std::cout << GetProcessorRank() << " " << __FILE__ << ":" << __LINE__ << " bad proc owner " << proc << " " << ElementTypeName(it->GetElementType()) << ":" << it->LocalID() << std::endl;
} //~ }
//assert(proc >= 0 && proc < mpisize); assert(proc >= 0 && proc < mpisize);
ghost[proc][ElementNum(it->GetElementType())].push_back(*it); ghost[proc][ElementNum(it->GetElementType())].push_back(*it);
} }
} }
...@@ -5737,7 +5739,11 @@ namespace INMOST ...@@ -5737,7 +5739,11 @@ namespace INMOST
memset(shared_space,0,sizeof(unsigned)*mpisize); //zero bits where we receive data memset(shared_space,0,sizeof(unsigned)*mpisize); //zero bits where we receive data
REPORT_MPI(MPI_Win_fence(0,window)); //wait memset finish REPORT_MPI(MPI_Win_fence(0,window)); //wait memset finish
for(i = 0; i < end; i++) shared_space[mpisize+i] = send_bufs[i].second.size()+1; //put data to special part of the memory for(i = 0; i < end; i++) shared_space[mpisize+i] = send_bufs[i].second.size()+1; //put data to special part of the memory
for(i = 0; i < end; i++) REPORT_MPI(MPI_Put(&shared_space[mpisize+i],1,MPI_UNSIGNED,send_bufs[i].first,mpirank,1,MPI_UNSIGNED,window)); //request rdma for(i = 0; i < end; i++)
{
assert( send_bufs[i].first >= 0 && send_bufs[i].first < GetProcessorsNumber() );
REPORT_MPI(MPI_Put(&shared_space[mpisize+i],1,MPI_UNSIGNED,send_bufs[i].first,mpirank,1,MPI_UNSIGNED,window)); //request rdma
}
REPORT_MPI(MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED,window)); //end exchange session REPORT_MPI(MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED,window)); //end exchange session
if( parallel_strategy == 0 ) if( parallel_strategy == 0 )
{ {
...@@ -5834,6 +5840,9 @@ namespace INMOST ...@@ -5834,6 +5840,9 @@ namespace INMOST
REPORT_VAL("put value", shared_space[mpisize+i]); REPORT_VAL("put value", shared_space[mpisize+i]);
REPORT_VAL("destination", send_bufs[i].first); REPORT_VAL("destination", send_bufs[i].first);
REPORT_VAL("displacement", mpirank); REPORT_VAL("displacement", mpirank);
//~ if( !(send_bufs[i].first >= 0 && send_bufs[i].first < GetProcessorsNumber()) )
//~ std::cout << "requested put to " << send_bufs[i].first << "/" << GetProcessorsNumber() << std::endl;
assert( send_bufs[i].first >= 0 && send_bufs[i].first < GetProcessorsNumber() );
REPORT_MPI(MPI_Put(&shared_space[mpisize+i],1,MPI_UNSIGNED,send_bufs[i].first,mpirank,1,MPI_UNSIGNED,window)); //request rdma to target processors for each value REPORT_MPI(MPI_Put(&shared_space[mpisize+i],1,MPI_UNSIGNED,send_bufs[i].first,mpirank,1,MPI_UNSIGNED,window)); //request rdma to target processors for each value
} }
REPORT_MPI(MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED,window)); //end exchange session REPORT_MPI(MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED,window)); //end exchange session
...@@ -5999,7 +6008,10 @@ namespace INMOST ...@@ -5999,7 +6008,10 @@ namespace INMOST
{ {
Storage::integer_array mark = it->IntegerArray(tag_sendto); Storage::integer_array mark = it->IntegerArray(tag_sendto);
for(Storage::integer_array::iterator kt = mark.begin(); kt != mark.end(); kt++) for(Storage::integer_array::iterator kt = mark.begin(); kt != mark.end(); kt++)
{
assert( *kt >= 0 && *kt < GetProcessorsNumber() );
if( *kt != mpirank ) send_elements[*kt][GetHandleElementNum(*it)].push_back(*it); if( *kt != mpirank ) send_elements[*kt][GetHandleElementNum(*it)].push_back(*it);
}
it->DelData(tag_sendto); it->DelData(tag_sendto);
} }
} }
...@@ -6686,10 +6698,10 @@ namespace INMOST ...@@ -6686,10 +6698,10 @@ namespace INMOST
bool delete_ghost = false; bool delete_ghost = false;
//if( layers == Integer(tag_layers) && bridge == Integer(tag_bridge) ) return; //if( layers == Integer(tag_layers) && bridge == Integer(tag_bridge) ) return;
//cout << "Check " << layers << " " << Integer(GetHandle(),tag_layers) << endl; //cout << "Check " << layers << " " << Integer(GetHandle(),tag_layers) << endl;
if( layers < Integer(GetHandle(),tag_layers) ) delete_ghost = true; //~ if( layers <= Integer(GetHandle(),tag_layers) ) delete_ghost = true;
else if( layers == Integer(GetHandle(),tag_layers) && bridge < Integer(GetHandle(),tag_bridge) ) delete_ghost = true; //~ else if( layers == Integer(GetHandle(),tag_layers) && bridge < Integer(GetHandle(),tag_bridge) ) delete_ghost = true;
//if (marker != 0) //if (marker != 0)
delete_ghost = true; delete_ghost = true;
int test_bridge = 0; int test_bridge = 0;
if( (bridge & MESH) || (bridge & ESET) || (bridge & CELL) ) throw Impossible; if( (bridge & MESH) || (bridge & ESET) || (bridge & CELL) ) throw Impossible;
...@@ -6728,7 +6740,10 @@ namespace INMOST ...@@ -6728,7 +6740,10 @@ namespace INMOST
{ {
Storage::integer_array adj_procs = jt->IntegerArrayDV(tag_processors); Storage::integer_array adj_procs = jt->IntegerArrayDV(tag_processors);
if( !std::binary_search(adj_procs.begin(),adj_procs.end(),p->first) ) if( !std::binary_search(adj_procs.begin(),adj_procs.end(),p->first) )
{
assert(p->first >= 0 && p->first <= GetProcessorsNumber());
jt->IntegerArray(tag_sendto).push_back(p->first); jt->IntegerArray(tag_sendto).push_back(p->first);
}
if( delete_ghost ) jt->IntegerArray(layers_marker).push_back(p->first); if( delete_ghost ) jt->IntegerArray(layers_marker).push_back(p->first);
} }
} }
...@@ -6777,7 +6792,10 @@ namespace INMOST ...@@ -6777,7 +6792,10 @@ namespace INMOST
ref_cur.push_back(*kt); ref_cur.push_back(*kt);
Storage::integer_array adj_procs = kt->IntegerArrayDV(tag_processors); Storage::integer_array adj_procs = kt->IntegerArrayDV(tag_processors);
if( !std::binary_search(adj_procs.begin(),adj_procs.end(),p->first) ) if( !std::binary_search(adj_procs.begin(),adj_procs.end(),p->first) )
{
assert(p->first >= 0 && p->first <= GetProcessorsNumber());
kt->IntegerArray(tag_sendto).push_back(p->first); kt->IntegerArray(tag_sendto).push_back(p->first);
}
if( delete_ghost ) kt->IntegerArray(layers_marker).push_back(p->first); if( delete_ghost ) kt->IntegerArray(layers_marker).push_back(p->first);
} }
kt->SetMarker(busy); kt->SetMarker(busy);
...@@ -6837,7 +6855,7 @@ namespace INMOST ...@@ -6837,7 +6855,7 @@ namespace INMOST
//Save("after_delete_ghost.pvtk"); //Save("after_delete_ghost.pvtk");
//exit(-1); //exit(-1);
} }
DeleteTag(layers_marker); layers_marker = DeleteTag(layers_marker);
//throw NotImplemented; //throw NotImplemented;
#else #else
(void) layers; (void) layers;
......
...@@ -788,18 +788,40 @@ namespace INMOST ...@@ -788,18 +788,40 @@ namespace INMOST
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
ierr = MPI_File_open(GetCommunicator(),const_cast<char *>(file.c_str()),MPI_MODE_WRONLY | MPI_MODE_CREATE,MPI_INFO_NULL,&fh); ierr = MPI_File_open(GetCommunicator(),const_cast<char *>(file.c_str()),MPI_MODE_WRONLY | MPI_MODE_CREATE,MPI_INFO_NULL,&fh);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
std::vector<INMOST_DATA_BIG_ENUM_TYPE> datasizes(rank ? 1 : size), offsets(rank ? 1 : size);
std::string buffer = rhs.str();
INMOST_DATA_BIG_ENUM_TYPE datasize = buffer.size(), offset;
ierr = MPI_Gather(&datasize,1,INMOST_MPI_DATA_BIG_ENUM_TYPE,&datasizes[0],1,INMOST_MPI_DATA_BIG_ENUM_TYPE,0,GetCommunicator());
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
if( rank == 0 ) if( rank == 0 )
{ {
MPI_Offset off;
std::stringstream header; std::stringstream header;
//header << "% vector " << name << std::endl; //header << "% vector " << name << std::endl;
//header << "% is written by INMOST" << std::endl; //header << "% is written by INMOST" << std::endl;
//header << "% by MPI_File_* api" << std::endl; //header << "% by MPI_File_* api" << std::endl;
header << vecsize << std::endl; header << vecsize << std::endl;
ierr = MPI_File_write_shared(fh,const_cast<char *>(header.str().c_str()),static_cast<int>(header.str().size()),MPI_CHAR,&stat); ierr = MPI_File_write(fh,const_cast<char *>(header.str().c_str()),static_cast<int>(header.str().size()),MPI_CHAR,&stat);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
ierr = MPI_File_get_position(fh,&off);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
offsets[0] = off;
for(int k = 1; k < size; ++k)
offsets[k] = offsets[k-1] + datasizes[k-1];
} }
ierr = MPI_File_write_ordered(fh,const_cast<char *>(rhs.str().c_str()),static_cast<int>(rhs.str().size()),MPI_CHAR,&stat); ierr = MPI_Scatter(&offsets[0],1,INMOST_MPI_DATA_BIG_ENUM_TYPE,&offset,1,INMOST_MPI_DATA_BIG_ENUM_TYPE,0,GetCommunicator());
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
if( datasize )
{
INMOST_DATA_BIG_ENUM_TYPE shift = 0, chunk;
while( shift != datasize )
{
chunk = std::min(static_cast<INMOST_DATA_BIG_ENUM_TYPE>(INT_MAX),datasize-shift);
ierr = MPI_File_write_at(fh,offset+shift,buffer.c_str()+shift,static_cast<INMOST_MPI_SIZE>(chunk),MPI_CHAR,&stat);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
shift += chunk;
}
}
ierr = MPI_File_close(&fh); ierr = MPI_File_close(&fh);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
} }
...@@ -1075,6 +1097,7 @@ namespace INMOST ...@@ -1075,6 +1097,7 @@ namespace INMOST
int ierr, len; int ierr, len;
MPI_File fh; MPI_File fh;
MPI_Status stat; MPI_Status stat;
std::string buffer = mtx.str();
ierr = MPI_File_open(GetCommunicator(),const_cast<char *>(file.c_str()), MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh); ierr = MPI_File_open(GetCommunicator(),const_cast<char *>(file.c_str()), MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
ierr = MPI_File_close(&fh); ierr = MPI_File_close(&fh);
...@@ -1088,8 +1111,13 @@ namespace INMOST ...@@ -1088,8 +1111,13 @@ namespace INMOST
std::cout << estring << std::endl; std::cout << estring << std::endl;
MPI_Abort(GetCommunicator(),__LINE__); MPI_Abort(GetCommunicator(),__LINE__);
} }
std::vector<INMOST_DATA_BIG_ENUM_TYPE> datasizes(rank ? 1 : size), offsets(rank ? 1 : size);
INMOST_DATA_BIG_ENUM_TYPE datasize = buffer.size(), offset;
ierr = MPI_Gather(&datasize,1,INMOST_MPI_DATA_BIG_ENUM_TYPE,&datasizes[0],1,INMOST_MPI_DATA_BIG_ENUM_TYPE,0,GetCommunicator());
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
if( rank == 0 ) if( rank == 0 )
{ {
MPI_Offset off;
std::stringstream header; std::stringstream header;
header << "%%MatrixMarket matrix coordinate real general" << std::endl; header << "%%MatrixMarket matrix coordinate real general" << std::endl;
header << "% matrix " << name << std::endl; header << "% matrix " << name << std::endl;
...@@ -1097,11 +1125,27 @@ namespace INMOST ...@@ -1097,11 +1125,27 @@ namespace INMOST
header << "% by MPI_File_* api" << std::endl; header << "% by MPI_File_* api" << std::endl;
header << matsize << " " << matsize << " " << nonzero << std::endl; header << matsize << " " << matsize << " " << nonzero << std::endl;
//std::string header_data(header.str()); //std::string header_data(header.str());
ierr = MPI_File_write_shared(fh,const_cast<char *>(header.str().c_str()),static_cast<int>(header.str().size()),MPI_CHAR,&stat); ierr = MPI_File_write(fh,const_cast<char *>(header.str().c_str()),static_cast<int>(header.str().size()),MPI_CHAR,&stat);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
ierr = MPI_File_get_position(fh,&off);
if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__); if( ierr != MPI_SUCCESS ) MPI_Abort(GetCommunicator(),__LINE__);
offsets[0] = off;
for(int k = 1; k < size; ++k)
offsets[k] = offsets[k-1] + datasizes[k-1];
} }
ierr = MPI_File_write_ordered(fh,const_cast<char *>(mtx.str().c_str()),static_cast<int>(mtx.str().size()),MPI_CHAR,&stat