From 2500c870cb9ade56ba0fd5c363d2fb4c1277ee7e Mon Sep 17 00:00:00 2001 From: Brody Richard Bassett Date: Mon, 12 Feb 2024 19:16:22 -0800 Subject: [PATCH 1/7] Added optional node pair exclusion --- src/Neighbor/ConnectivityMap.cc | 6 ++++-- src/Neighbor/ConnectivityMap.hh | 6 ++++++ src/Neighbor/ConnectivityMapInline.hh | 11 +++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/Neighbor/ConnectivityMap.cc b/src/Neighbor/ConnectivityMap.cc index bebf3b41f..1c68e45ad 100644 --- a/src/Neighbor/ConnectivityMap.cc +++ b/src/Neighbor/ConnectivityMap.cc @@ -134,7 +134,8 @@ ConnectivityMap(): mNodeTraversalIndices(), mKeys(FieldStorageType::CopyFields), mCouplingPtr(std::make_shared()), - mIntersectionConnectivity() { + mIntersectionConnectivity(), + mExcludePairs([](int, int, int, int) { return false; }){ } //------------------------------------------------------------------------------ @@ -269,7 +270,8 @@ patchConnectivity(const FieldList& flags, const auto jNodeList = mNodePairList[k].j_list; const auto i = mNodePairList[k].i_node; const auto j = mNodePairList[k].j_node; - if (flags(iNodeList, i) != 0 and flags(jNodeList, j) != 0) { + if ((flags(iNodeList, i) != 0 and flags(jNodeList, j) != 0) and + !mExcludePairs(iNodeList, i, jNodeList, j)) { culledPairs_thread.push_back(NodePairIdxType(old2new(iNodeList, i), iNodeList, old2new(jNodeList, j), jNodeList)); } diff --git a/src/Neighbor/ConnectivityMap.hh b/src/Neighbor/ConnectivityMap.hh index 4275d9e20..e0bae613b 100644 --- a/src/Neighbor/ConnectivityMap.hh +++ b/src/Neighbor/ConnectivityMap.hh @@ -171,6 +171,9 @@ public: // Return which NodeList index in order the given one would be in our connectivity. unsigned nodeListIndex(const NodeList* nodeListPtr) const; + // Exclude (nodeListi, nodei, nodeListj, nodej) pairs if this returns false + void setNodePairExclusion(std::function excludePairs); + // Check that the internal data structure is valid. bool valid() const; @@ -208,6 +211,9 @@ private: using IntersectionConnectivityContainer = std::unordered_map>>; IntersectionConnectivityContainer mIntersectionConnectivity; + // Exclude pairs matching this function + std::function mExcludePairs; + // Internal method to fill in the connectivity, once the set of NodeLists // is determined. void computeConnectivity(); diff --git a/src/Neighbor/ConnectivityMapInline.hh b/src/Neighbor/ConnectivityMapInline.hh index 47f3836ab..41efab444 100644 --- a/src/Neighbor/ConnectivityMapInline.hh +++ b/src/Neighbor/ConnectivityMapInline.hh @@ -390,4 +390,15 @@ intersectionConnectivity(const NodePairIdxType& pair) const { return itr->second; } +//------------------------------------------------------------------------------ +// Set the function for excluding node pairs +//------------------------------------------------------------------------------ +template +void +ConnectivityMap:: +setNodePairExclusion(std::function excludePairs) +{ + mExcludePairs = excludePairs; +} + } From 375600d1a3f35c03b0b81348cbb785e4fee82f4f Mon Sep 17 00:00:00 2001 From: Brody Richard Bassett Date: Wed, 21 Feb 2024 10:50:27 -0800 Subject: [PATCH 2/7] Test functions for ensuring that node pairs are unique. --- src/Neighbor/ConnectivityMap.cc | 2 ++ src/Neighbor/ConnectivityMap.hh | 2 +- src/Neighbor/NodePairList.cc | 29 +++++++++++++++++++++++++++++ src/Neighbor/NodePairList.hh | 1 + 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/Neighbor/ConnectivityMap.cc b/src/Neighbor/ConnectivityMap.cc index 1c68e45ad..f052f09ba 100644 --- a/src/Neighbor/ConnectivityMap.cc +++ b/src/Neighbor/ConnectivityMap.cc @@ -1128,6 +1128,8 @@ computeConnectivity() { // } // } + mNodePairList.makeUnique(); + // Post conditions. BEGIN_CONTRACT_SCOPE // Make sure that the correct number of nodes have been completed. diff --git a/src/Neighbor/ConnectivityMap.hh b/src/Neighbor/ConnectivityMap.hh index e0bae613b..38c9a121d 100644 --- a/src/Neighbor/ConnectivityMap.hh +++ b/src/Neighbor/ConnectivityMap.hh @@ -173,7 +173,7 @@ public: // Exclude (nodeListi, nodei, nodeListj, nodej) pairs if this returns false void setNodePairExclusion(std::function excludePairs); - + // Check that the internal data structure is valid. bool valid() const; diff --git a/src/Neighbor/NodePairList.cc b/src/Neighbor/NodePairList.cc index 4e76a3bcd..32d21e934 100644 --- a/src/Neighbor/NodePairList.cc +++ b/src/Neighbor/NodePairList.cc @@ -1,5 +1,7 @@ #include "NodePairList.hh" +#include + namespace Spheral { NodePairIdxType::NodePairIdxType(int i_n, int i_l, int j_n, int j_l, double f) : @@ -15,4 +17,31 @@ namespace Spheral { mNodePairList.clear(); } + void NodePairList::makeUnique() { + // Make sure the node pairs are ordered correctly + for (auto kk = 0u; kk < mNodePairList.size(); ++kk) + { + auto& pair = mNodePairList[kk]; + if (pair.i_list > pair.j_list) + { + const auto temp_list = pair.j_list; + const auto temp_node = pair.j_node; + pair.j_list = pair.i_list; + pair.j_node = pair.i_node; + pair.i_list = temp_list; + pair.i_node = temp_node; + } + if (pair.i_list == pair.j_list && pair.i_node > pair.j_node) + { + const auto temp = pair.j_node; + pair.j_node = pair.i_node; + pair.i_node = temp; + } + } + + // Remove duplicates + std::sort(mNodePairList.begin(), mNodePairList.end()); + mNodePairList.erase(std::unique(mNodePairList.begin(), mNodePairList.end()), mNodePairList.end()); + } + } diff --git a/src/Neighbor/NodePairList.hh b/src/Neighbor/NodePairList.hh index 51c4077a1..06299e468 100644 --- a/src/Neighbor/NodePairList.hh +++ b/src/Neighbor/NodePairList.hh @@ -60,6 +60,7 @@ public: void push_back(NodePairIdxType nodePair); void clear(); size_t size() const { return mNodePairList.size(); } + void makeUnique(); // Iterators iterator begin() { return mNodePairList.begin(); } From 4220f38a3d28606d85ed6700f0f5f0dd26643783 Mon Sep 17 00:00:00 2001 From: Brody Richard Bassett Date: Wed, 27 Mar 2024 14:21:18 -0700 Subject: [PATCH 3/7] Switched location of culling. --- src/Neighbor/ConnectivityMap.cc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/Neighbor/ConnectivityMap.cc b/src/Neighbor/ConnectivityMap.cc index f052f09ba..3489dfe95 100644 --- a/src/Neighbor/ConnectivityMap.cc +++ b/src/Neighbor/ConnectivityMap.cc @@ -270,11 +270,9 @@ patchConnectivity(const FieldList& flags, const auto jNodeList = mNodePairList[k].j_list; const auto i = mNodePairList[k].i_node; const auto j = mNodePairList[k].j_node; - if ((flags(iNodeList, i) != 0 and flags(jNodeList, j) != 0) and - !mExcludePairs(iNodeList, i, jNodeList, j)) { + if (flags(iNodeList, i) != 0 and flags(jNodeList, j) != 0) culledPairs_thread.push_back(NodePairIdxType(old2new(iNodeList, i), iNodeList, old2new(jNodeList, j), jNodeList)); - } } #pragma omp critical { @@ -921,9 +919,14 @@ computeConnectivity() { // We don't include self-interactions. if ((iNodeList != jNodeList) or (i != j)) { - neighbors[jNodeList].push_back(j); - if (calculatePairInteraction(iNodeList, i, jNodeList, j, firstGhostNodej)) nodePairs_private.push_back(NodePairIdxType(i, iNodeList, j, jNodeList)); - if (domainDecompIndependent) keys[jNodeList].push_back(pair(j, mKeys(jNodeList, j))); + + // Exclude specified pairs. + if (!mExcludePairs(iNodeList, i, jNodeList, j)) + { + neighbors[jNodeList].push_back(j); + if (calculatePairInteraction(iNodeList, i, jNodeList, j, firstGhostNodej)) nodePairs_private.push_back(NodePairIdxType(i, iNodeList, j, jNodeList)); + if (domainDecompIndependent) keys[jNodeList].push_back(pair(j, mKeys(jNodeList, j))); + } } } } From b990b4dc7438f038cce32282771aba360506c504 Mon Sep 17 00:00:00 2001 From: Brody Richard Bassett Date: Mon, 28 Oct 2024 16:46:14 -0700 Subject: [PATCH 4/7] Added function to integrator for culling --- src/Integrator/Integrator.cc | 167 +++++++++++++++++++---------------- src/Integrator/Integrator.hh | 1 + 2 files changed, 93 insertions(+), 75 deletions(-) diff --git a/src/Integrator/Integrator.cc b/src/Integrator/Integrator.cc index f0d914c45..bc5099e24 100644 --- a/src/Integrator/Integrator.cc +++ b/src/Integrator/Integrator.cc @@ -537,95 +537,112 @@ Integrator::setGhostNodes() { db.updateConnectivityMap(mRequireGhostConnectivity, mRequireOverlapConnectivity, mRequireIntersectionConnectivity); // If we're culling ghost nodes, do it now. - if (mCullGhostNodes and - (not this->domainDecompositionIndependent()) and - (not mRequireGhostConnectivity) and - (not mRequireOverlapConnectivity)) { - const auto numNodeLists = db.numNodeLists(); - const auto& cm = db.connectivityMap(); - - // First build the set of flags indicating which nodes are used. - FieldList flags = db.newGlobalFieldList(0, "active nodes"); - for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { - const auto& nodeList = *nodeListPtr; - for (auto i = 0u; i < nodeList.numInternalNodes(); ++i) { - flags(nodeListi, i) = 1; - const vector >& fullConnectivity = cm.connectivityForNode(&nodeList, i); - for (auto nodeListj = 0u; nodeListj < fullConnectivity.size(); ++nodeListj) { - const vector& connectivity = fullConnectivity[nodeListj]; - for (auto j: connectivity) flags(nodeListj, j) = 1; - } - } + cullGhostNodesFunc(); - // Ghost nodes that are control nodes for other ghost nodes we're keeping must - // be kept as well. - const auto firstGhostNode = nodeList.firstGhostNode(); - for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { - const auto& boundary = *boundaryPtr; - const auto& controlNodes = boundary.controlNodes(nodeList); - const auto& ghostNodes = boundary.ghostNodes(nodeList); - // CHECK(controlNodes.size() == ghostNodes.size()); // Not true if this is a DistributedBoundary! - for (auto i: controlNodes) { - if (i >= (int)firstGhostNode) flags(nodeListi, i) = 1; - } - - // Boundary conditions are allowed to opt out of culling entirely. - if (not boundary.allowGhostCulling()) { - for (auto i: ghostNodes) flags(nodeListi, i) = 1; - } - } - } + // } else { - // Create the index mapping from old to new node orderings. - FieldList old2newIndexMap = db.newGlobalFieldList(int(0), "index map"); - for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { - const auto numNodes = nodeListPtr->numNodes(); - for (auto i = 0u; i < numNodes; ++i) old2newIndexMap(nodeListi, i) = i; - } + // // We're not connectivity and don't need ghost nodes, so make sure all + // // boundaries are empty. + // const vector*> boundaries = uniqueBoundaryConditions(); + // for (ConstBoundaryIterator boundaryItr = boundaries.begin(); + // boundaryItr != boundaries.end(); + // ++boundaryItr) (*boundaryItr)->reset(db); + } +} - // Now use these flags to cull the boundary conditions. - vector numNodesRemoved(numNodeLists, 0); - for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { - boundaryPtr->cullGhostNodes(flags, old2newIndexMap, numNodesRemoved); - } +//------------------------------------------------------------------------------ +// Cull the ghost nodes +//------------------------------------------------------------------------------ +template +void +Integrator::cullGhostNodesFunc() { + + // Get that DataBase. + auto& db = accessDataBase(); - // Patch up the connectivity map. - db.patchConnectivityMap(flags, old2newIndexMap); + // Get the complete set of unique boundary conditions. + const auto boundaries = uniqueBoundaryConditions(); - // Now the boundary conditions have been updated, so we can go ahead and remove - // the ghost nodes themselves from the NodeLists. - for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { - auto& nodeList = *nodeListPtr; - vector nodesToRemove; - for (auto i = nodeList.firstGhostNode(); i < nodeList.numNodes(); ++i) { - if (flags(nodeListi, i) == 0) nodesToRemove.push_back(i); + if (mRequireConnectivity and + mCullGhostNodes and + (not this->domainDecompositionIndependent()) and + (not mRequireGhostConnectivity) and + (not mRequireOverlapConnectivity)) { + const auto numNodeLists = db.numNodeLists(); + const auto& cm = db.connectivityMap(); + + // First build the set of flags indicating which nodes are used. + FieldList flags = db.newGlobalFieldList(0, "active nodes"); + for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { + const auto& nodeList = *nodeListPtr; + for (auto i = 0u; i < nodeList.numInternalNodes(); ++i) { + flags(nodeListi, i) = 1; + const vector >& fullConnectivity = cm.connectivityForNode(&nodeList, i); + for (auto nodeListj = 0u; nodeListj < fullConnectivity.size(); ++nodeListj) { + const vector& connectivity = fullConnectivity[nodeListj]; + for (auto j: connectivity) flags(nodeListj, j) = 1; } - nodeList.deleteNodes(nodesToRemove); - nodeList.neighbor().updateNodes(); } - // All nodes should now be labeled as keepers. - BEGIN_CONTRACT_SCOPE - { - for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { - ENSURE(flags[nodeListi]->numElements() == 0 or - *min_element(flags[nodeListi]->begin(), flags[nodeListi]->end()) == 1); + // Ghost nodes that are control nodes for other ghost nodes we're keeping must + // be kept as well. + const auto firstGhostNode = nodeList.firstGhostNode(); + for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { + const auto& boundary = *boundaryPtr; + const auto& controlNodes = boundary.controlNodes(nodeList); + const auto& ghostNodes = boundary.ghostNodes(nodeList); + // CHECK(controlNodes.size() == ghostNodes.size()); // Not true if this is a DistributedBoundary! + for (auto i: controlNodes) { + if (i >= (int)firstGhostNode) flags(nodeListi, i) = 1; + } + + // Boundary conditions are allowed to opt out of culling entirely. + if (not boundary.allowGhostCulling()) { + for (auto i: ghostNodes) flags(nodeListi, i) = 1; } } - END_CONTRACT_SCOPE + } - // The ConnectivityMap should be valid too. - ENSURE(db.connectivityMap().valid()); + // Create the index mapping from old to new node orderings. + FieldList old2newIndexMap = db.newGlobalFieldList(int(0), "index map"); + for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { + const auto numNodes = nodeListPtr->numNodes(); + for (auto i = 0u; i < numNodes; ++i) old2newIndexMap(nodeListi, i) = i; } - // } else { + // Now use these flags to cull the boundary conditions. + vector numNodesRemoved(numNodeLists, 0); + for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { + boundaryPtr->cullGhostNodes(flags, old2newIndexMap, numNodesRemoved); + } - // // We're not connectivity and don't need ghost nodes, so make sure all - // // boundaries are empty. - // const vector*> boundaries = uniqueBoundaryConditions(); - // for (ConstBoundaryIterator boundaryItr = boundaries.begin(); - // boundaryItr != boundaries.end(); - // ++boundaryItr) (*boundaryItr)->reset(db); + // Patch up the connectivity map. + db.patchConnectivityMap(flags, old2newIndexMap); + + // Now the boundary conditions have been updated, so we can go ahead and remove + // the ghost nodes themselves from the NodeLists. + for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { + auto& nodeList = *nodeListPtr; + vector nodesToRemove; + for (auto i = nodeList.firstGhostNode(); i < nodeList.numNodes(); ++i) { + if (flags(nodeListi, i) == 0) nodesToRemove.push_back(i); + } + nodeList.deleteNodes(nodesToRemove); + nodeList.neighbor().updateNodes(); + } + + // All nodes should now be labeled as keepers. + BEGIN_CONTRACT_SCOPE + { + for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { + ENSURE(flags[nodeListi]->numElements() == 0 or + *min_element(flags[nodeListi]->begin(), flags[nodeListi]->end()) == 1); + } + } + END_CONTRACT_SCOPE + + // The ConnectivityMap should be valid too. + ENSURE(db.connectivityMap().valid()); } } diff --git a/src/Integrator/Integrator.hh b/src/Integrator/Integrator.hh index 86bb1d2ab..ac099c55f 100644 --- a/src/Integrator/Integrator.hh +++ b/src/Integrator/Integrator.hh @@ -209,6 +209,7 @@ public: // Select whether we're going to enforce culling of ghost nodes or not. bool cullGhostNodes() const; void cullGhostNodes(bool x); + void cullGhostNodesFunc(); // TODO: rename //**************************************************************************** // Methods required for restarting. From b8b73a6382871664884b4f71697ea4b79d940b82 Mon Sep 17 00:00:00 2001 From: Brody Richard Bassett Date: Mon, 28 Oct 2024 17:30:27 -0700 Subject: [PATCH 5/7] Revert "Added function to integrator for culling" This reverts commit b990b4dc7438f038cce32282771aba360506c504. --- src/Integrator/Integrator.cc | 167 ++++++++++++++++------------------- src/Integrator/Integrator.hh | 1 - 2 files changed, 75 insertions(+), 93 deletions(-) diff --git a/src/Integrator/Integrator.cc b/src/Integrator/Integrator.cc index bc5099e24..f0d914c45 100644 --- a/src/Integrator/Integrator.cc +++ b/src/Integrator/Integrator.cc @@ -537,112 +537,95 @@ Integrator::setGhostNodes() { db.updateConnectivityMap(mRequireGhostConnectivity, mRequireOverlapConnectivity, mRequireIntersectionConnectivity); // If we're culling ghost nodes, do it now. - cullGhostNodesFunc(); + if (mCullGhostNodes and + (not this->domainDecompositionIndependent()) and + (not mRequireGhostConnectivity) and + (not mRequireOverlapConnectivity)) { + const auto numNodeLists = db.numNodeLists(); + const auto& cm = db.connectivityMap(); + + // First build the set of flags indicating which nodes are used. + FieldList flags = db.newGlobalFieldList(0, "active nodes"); + for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { + const auto& nodeList = *nodeListPtr; + for (auto i = 0u; i < nodeList.numInternalNodes(); ++i) { + flags(nodeListi, i) = 1; + const vector >& fullConnectivity = cm.connectivityForNode(&nodeList, i); + for (auto nodeListj = 0u; nodeListj < fullConnectivity.size(); ++nodeListj) { + const vector& connectivity = fullConnectivity[nodeListj]; + for (auto j: connectivity) flags(nodeListj, j) = 1; + } + } - // } else { + // Ghost nodes that are control nodes for other ghost nodes we're keeping must + // be kept as well. + const auto firstGhostNode = nodeList.firstGhostNode(); + for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { + const auto& boundary = *boundaryPtr; + const auto& controlNodes = boundary.controlNodes(nodeList); + const auto& ghostNodes = boundary.ghostNodes(nodeList); + // CHECK(controlNodes.size() == ghostNodes.size()); // Not true if this is a DistributedBoundary! + for (auto i: controlNodes) { + if (i >= (int)firstGhostNode) flags(nodeListi, i) = 1; + } + + // Boundary conditions are allowed to opt out of culling entirely. + if (not boundary.allowGhostCulling()) { + for (auto i: ghostNodes) flags(nodeListi, i) = 1; + } + } + } - // // We're not connectivity and don't need ghost nodes, so make sure all - // // boundaries are empty. - // const vector*> boundaries = uniqueBoundaryConditions(); - // for (ConstBoundaryIterator boundaryItr = boundaries.begin(); - // boundaryItr != boundaries.end(); - // ++boundaryItr) (*boundaryItr)->reset(db); - } -} + // Create the index mapping from old to new node orderings. + FieldList old2newIndexMap = db.newGlobalFieldList(int(0), "index map"); + for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { + const auto numNodes = nodeListPtr->numNodes(); + for (auto i = 0u; i < numNodes; ++i) old2newIndexMap(nodeListi, i) = i; + } -//------------------------------------------------------------------------------ -// Cull the ghost nodes -//------------------------------------------------------------------------------ -template -void -Integrator::cullGhostNodesFunc() { - - // Get that DataBase. - auto& db = accessDataBase(); + // Now use these flags to cull the boundary conditions. + vector numNodesRemoved(numNodeLists, 0); + for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { + boundaryPtr->cullGhostNodes(flags, old2newIndexMap, numNodesRemoved); + } - // Get the complete set of unique boundary conditions. - const auto boundaries = uniqueBoundaryConditions(); + // Patch up the connectivity map. + db.patchConnectivityMap(flags, old2newIndexMap); - if (mRequireConnectivity and - mCullGhostNodes and - (not this->domainDecompositionIndependent()) and - (not mRequireGhostConnectivity) and - (not mRequireOverlapConnectivity)) { - const auto numNodeLists = db.numNodeLists(); - const auto& cm = db.connectivityMap(); - - // First build the set of flags indicating which nodes are used. - FieldList flags = db.newGlobalFieldList(0, "active nodes"); - for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { - const auto& nodeList = *nodeListPtr; - for (auto i = 0u; i < nodeList.numInternalNodes(); ++i) { - flags(nodeListi, i) = 1; - const vector >& fullConnectivity = cm.connectivityForNode(&nodeList, i); - for (auto nodeListj = 0u; nodeListj < fullConnectivity.size(); ++nodeListj) { - const vector& connectivity = fullConnectivity[nodeListj]; - for (auto j: connectivity) flags(nodeListj, j) = 1; + // Now the boundary conditions have been updated, so we can go ahead and remove + // the ghost nodes themselves from the NodeLists. + for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { + auto& nodeList = *nodeListPtr; + vector nodesToRemove; + for (auto i = nodeList.firstGhostNode(); i < nodeList.numNodes(); ++i) { + if (flags(nodeListi, i) == 0) nodesToRemove.push_back(i); } + nodeList.deleteNodes(nodesToRemove); + nodeList.neighbor().updateNodes(); } - // Ghost nodes that are control nodes for other ghost nodes we're keeping must - // be kept as well. - const auto firstGhostNode = nodeList.firstGhostNode(); - for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { - const auto& boundary = *boundaryPtr; - const auto& controlNodes = boundary.controlNodes(nodeList); - const auto& ghostNodes = boundary.ghostNodes(nodeList); - // CHECK(controlNodes.size() == ghostNodes.size()); // Not true if this is a DistributedBoundary! - for (auto i: controlNodes) { - if (i >= (int)firstGhostNode) flags(nodeListi, i) = 1; - } - - // Boundary conditions are allowed to opt out of culling entirely. - if (not boundary.allowGhostCulling()) { - for (auto i: ghostNodes) flags(nodeListi, i) = 1; + // All nodes should now be labeled as keepers. + BEGIN_CONTRACT_SCOPE + { + for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { + ENSURE(flags[nodeListi]->numElements() == 0 or + *min_element(flags[nodeListi]->begin(), flags[nodeListi]->end()) == 1); } } - } + END_CONTRACT_SCOPE - // Create the index mapping from old to new node orderings. - FieldList old2newIndexMap = db.newGlobalFieldList(int(0), "index map"); - for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { - const auto numNodes = nodeListPtr->numNodes(); - for (auto i = 0u; i < numNodes; ++i) old2newIndexMap(nodeListi, i) = i; + // The ConnectivityMap should be valid too. + ENSURE(db.connectivityMap().valid()); } - // Now use these flags to cull the boundary conditions. - vector numNodesRemoved(numNodeLists, 0); - for (auto* boundaryPtr: range(boundaries.begin(), boundaries.end())) { - boundaryPtr->cullGhostNodes(flags, old2newIndexMap, numNodesRemoved); - } - - // Patch up the connectivity map. - db.patchConnectivityMap(flags, old2newIndexMap); - - // Now the boundary conditions have been updated, so we can go ahead and remove - // the ghost nodes themselves from the NodeLists. - for (auto [nodeListi, nodeListPtr]: enumerate(db.nodeListBegin(), db.nodeListEnd())) { - auto& nodeList = *nodeListPtr; - vector nodesToRemove; - for (auto i = nodeList.firstGhostNode(); i < nodeList.numNodes(); ++i) { - if (flags(nodeListi, i) == 0) nodesToRemove.push_back(i); - } - nodeList.deleteNodes(nodesToRemove); - nodeList.neighbor().updateNodes(); - } - - // All nodes should now be labeled as keepers. - BEGIN_CONTRACT_SCOPE - { - for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { - ENSURE(flags[nodeListi]->numElements() == 0 or - *min_element(flags[nodeListi]->begin(), flags[nodeListi]->end()) == 1); - } - } - END_CONTRACT_SCOPE + // } else { - // The ConnectivityMap should be valid too. - ENSURE(db.connectivityMap().valid()); + // // We're not connectivity and don't need ghost nodes, so make sure all + // // boundaries are empty. + // const vector*> boundaries = uniqueBoundaryConditions(); + // for (ConstBoundaryIterator boundaryItr = boundaries.begin(); + // boundaryItr != boundaries.end(); + // ++boundaryItr) (*boundaryItr)->reset(db); } } diff --git a/src/Integrator/Integrator.hh b/src/Integrator/Integrator.hh index ac099c55f..86bb1d2ab 100644 --- a/src/Integrator/Integrator.hh +++ b/src/Integrator/Integrator.hh @@ -209,7 +209,6 @@ public: // Select whether we're going to enforce culling of ghost nodes or not. bool cullGhostNodes() const; void cullGhostNodes(bool x); - void cullGhostNodesFunc(); // TODO: rename //**************************************************************************** // Methods required for restarting. From 679d504290b4cd167f381c7a4aa363f44ad9c649 Mon Sep 17 00:00:00 2001 From: Brody Richard Bassett Date: Wed, 4 Dec 2024 14:10:13 -0800 Subject: [PATCH 6/7] Ignore connectivity in FSISPH if f_couple=0 --- src/FSISPH/SolidFSISPHEvaluateDerivatives.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/FSISPH/SolidFSISPHEvaluateDerivatives.cc b/src/FSISPH/SolidFSISPHEvaluateDerivatives.cc index c6000b4b1..0cf8dae32 100644 --- a/src/FSISPH/SolidFSISPHEvaluateDerivatives.cc +++ b/src/FSISPH/SolidFSISPHEvaluateDerivatives.cc @@ -233,6 +233,10 @@ secondDerivativesLoop(const typename Dimension::Scalar time, j = pairs[kk].j_node; nodeListi = pairs[kk].i_list; nodeListj = pairs[kk].j_list; + if (pairs[kk].f_couple < 1.0e-16) + { + continue; + } // Get the state for node i. const auto& interfaceFlagsi = interfaceFlags(nodeListi,i); @@ -556,8 +560,8 @@ secondDerivativesLoop(const typename Dimension::Scalar time, if (freeParticle) { DvDti += mj*deltaDvDt; DvDtj -= mi*deltaDvDt; - } - + } + // Velocity Gradient //----------------------------------------------------------- // construct our interface velocity From 02119b0b8ae9110814b5472a3912dd608af6ee2b Mon Sep 17 00:00:00 2001 From: Brody Richard Bassett Date: Tue, 18 Feb 2025 14:46:36 -0800 Subject: [PATCH 7/7] Tools for density update --- src/FSISPH/SolidFSISPHHydroBase.cc | 5 +- src/FSISPH/SolidFSISPHHydroBase.hh | 1 + src/FSISPH/computeFSISPHSumMassDensity.cc | 6 ++- src/FSISPH/computeFSISPHSumMassDensity.hh | 3 +- .../computeFSISPHSumMassDensityInst.cc.py | 1 + src/Neighbor/ConnectivityMap.cc | 48 +++++++++++++++++++ src/Neighbor/ConnectivityMap.hh | 4 +- src/Neighbor/NodePairList.hh | 3 +- 8 files changed, 65 insertions(+), 6 deletions(-) diff --git a/src/FSISPH/SolidFSISPHHydroBase.cc b/src/FSISPH/SolidFSISPHHydroBase.cc index 757d32724..5f6139d64 100644 --- a/src/FSISPH/SolidFSISPHHydroBase.cc +++ b/src/FSISPH/SolidFSISPHHydroBase.cc @@ -520,6 +520,8 @@ preStepInitialize(const DataBase& dataBase, TIME_BEGIN("SolidFSISPHpreStepInitialize"); if (mApplySelectDensitySum){ switch(this->densityUpdate()){ + case FSIMassDensityMethod::FSIConsistentSumMassDensity: + // fallthrough intended case FSIMassDensityMethod::FSISumMassDensity: { const auto& W = this->kernel(); @@ -528,7 +530,8 @@ preStepInitialize(const DataBase& dataBase, const auto mass = state.fields(HydroFieldNames::mass, 0.0); const auto H = state.fields(HydroFieldNames::H, SymTensor::zero); auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); - computeFSISPHSumMassDensity(connectivityMap, W, mSumDensityNodeLists, position, mass, H, massDensity); + const auto consistentSum = (this->densityUpdate() == FSIMassDensityMethod::FSIConsistentSumMassDensity); + computeFSISPHSumMassDensity(connectivityMap, W, mSumDensityNodeLists, position, mass, H, consistentSum, massDensity); for (auto boundaryItr = this->boundaryBegin(); boundaryItr < this->boundaryEnd(); ++boundaryItr) (*boundaryItr)->applyFieldListGhostBoundary(massDensity); for (auto boundaryItr = this->boundaryBegin(); boundaryItr < this->boundaryEnd(); ++boundaryItr) (*boundaryItr)->finalizeGhostBoundary(); break; diff --git a/src/FSISPH/SolidFSISPHHydroBase.hh b/src/FSISPH/SolidFSISPHHydroBase.hh index a14519e56..9d61edfa6 100644 --- a/src/FSISPH/SolidFSISPHHydroBase.hh +++ b/src/FSISPH/SolidFSISPHHydroBase.hh @@ -32,6 +32,7 @@ enum class FSIMassDensityMethod { FSISumMassDensity = 0, PressureCorrectSumMassDensity = 1, HWeightedSumMassDensity = 2, + FSIConsistentSumMassDensity = 3, }; template class State; diff --git a/src/FSISPH/computeFSISPHSumMassDensity.cc b/src/FSISPH/computeFSISPHSumMassDensity.cc index e44d9ca7e..f9266dd6c 100644 --- a/src/FSISPH/computeFSISPHSumMassDensity.cc +++ b/src/FSISPH/computeFSISPHSumMassDensity.cc @@ -14,6 +14,7 @@ computeFSISPHSumMassDensity(const ConnectivityMap& connectivityMap, const FieldList& position, const FieldList& mass, const FieldList& H, + const bool consistentSum, FieldList& massDensity) { // Pre-conditions. @@ -61,6 +62,7 @@ computeFSISPHSumMassDensity(const ConnectivityMap& connectivityMap, const auto& ri = position(nodeListi, i); const auto& rj = position(nodeListj, j); const auto rij = ri - rj; + const auto normalSum = (nodeListi == nodeListj) || consistentSum; if(sumDensityNodeLists[nodeListi]==1){ const auto& Hi = H(nodeListi, i); @@ -68,7 +70,7 @@ computeFSISPHSumMassDensity(const ConnectivityMap& connectivityMap, const auto etai = (Hi*rij).magnitude(); const auto Wi = W.kernelValue(etai, Hdeti); - massDensity_thread(nodeListi, i) += (nodeListi == nodeListj ? mj : mi)*Wi; + massDensity_thread(nodeListi, i) += (normalSum ? mj : mi)*Wi; } if(sumDensityNodeLists[nodeListj]==1){ @@ -77,7 +79,7 @@ computeFSISPHSumMassDensity(const ConnectivityMap& connectivityMap, const auto etaj = (Hj*rij).magnitude(); const auto Wj = W.kernelValue(etaj, Hdetj); - massDensity_thread(nodeListj, j) += (nodeListi == nodeListj ? mi : mj)*Wj; + massDensity_thread(nodeListj, j) += (normalSum ? mi : mj)*Wj; } } diff --git a/src/FSISPH/computeFSISPHSumMassDensity.hh b/src/FSISPH/computeFSISPHSumMassDensity.hh index 902b6c341..cee9723ab 100644 --- a/src/FSISPH/computeFSISPHSumMassDensity.hh +++ b/src/FSISPH/computeFSISPHSumMassDensity.hh @@ -22,9 +22,10 @@ computeFSISPHSumMassDensity(const ConnectivityMap& connectivityMap, const FieldList& position, const FieldList& mass, const FieldList& H, + const bool consistentSum, FieldList& massDensity); } - #endif \ No newline at end of file + #endif diff --git a/src/FSISPH/computeFSISPHSumMassDensityInst.cc.py b/src/FSISPH/computeFSISPHSumMassDensityInst.cc.py index 7e6d30afc..ec820e2ab 100644 --- a/src/FSISPH/computeFSISPHSumMassDensityInst.cc.py +++ b/src/FSISPH/computeFSISPHSumMassDensityInst.cc.py @@ -14,6 +14,7 @@ const FieldList, Dim< %(ndim)s >::Vector>&, const FieldList, Dim< %(ndim)s >::Scalar>&, const FieldList, Dim< %(ndim)s >::SymTensor>&, + const bool consistentSum, FieldList, Dim< %(ndim)s >::Scalar>&); } """ diff --git a/src/Neighbor/ConnectivityMap.cc b/src/Neighbor/ConnectivityMap.cc index 3489dfe95..a8971db99 100644 --- a/src/Neighbor/ConnectivityMap.cc +++ b/src/Neighbor/ConnectivityMap.cc @@ -1152,4 +1152,52 @@ computeConnectivity() { TIME_END("ConnectivityMap_computeConnectivity"); } +//------------------------------------------------------------------------------ +// Remove given pairs from the connectivity in place +//------------------------------------------------------------------------------ +template +void +ConnectivityMap:: +removeConnectivity(std::function excludePairs) +{ + const auto domainDecompIndependent = NodeListRegistrar::instance().domainDecompositionIndependent(); + const auto numNodeLists = mNodeLists.size(); + for (auto iNodeList = 0u; iNodeList != numNodeLists; ++iNodeList) { + const auto numNodes = ((domainDecompIndependent or mBuildGhostConnectivity or mBuildOverlapConnectivity) ? + mNodeLists[iNodeList]->numNodes() : + mNodeLists[iNodeList]->numInternalNodes()); + for (auto i = 0u; i < numNodes; ++i) { + auto& neighbors = mConnectivity[i]; + CHECK(neighbors.size() == numNodeLists); + for (auto jNodeList = 0u; jNodeList < numNodeLists; ++jNodeList) { + auto& neighborsj = neighbors[jNodeList]; + auto l = 0u; + for (auto k = 0u; k < neighborsj.size(); ++k) + { + const auto j = neighborsj[k]; + if (!excludePairs(iNodeList, i, jNodeList, j)) + { + neighborsj[l++] = neighborsj[k]; + } + } + neighborsj.resize(l); + } + } + } + + const auto npairs = mNodePairList.size(); + auto l = 0u; + for (auto k = 0u; k < npairs; ++k) { + const auto iNodeList = mNodePairList[k].i_list; + const auto jNodeList = mNodePairList[k].j_list; + const auto i = mNodePairList[k].i_node; + const auto j = mNodePairList[k].j_node; + if (!excludePairs(iNodeList, i, jNodeList, j)) + { + mNodePairList[l++] = mNodePairList[k]; + } + } + mNodePairList.resize(l); +} + } diff --git a/src/Neighbor/ConnectivityMap.hh b/src/Neighbor/ConnectivityMap.hh index 38c9a121d..1e63fb414 100644 --- a/src/Neighbor/ConnectivityMap.hh +++ b/src/Neighbor/ConnectivityMap.hh @@ -63,6 +63,9 @@ public: // member of a pair (maintaining symmetry). void removeConnectivity(const FieldList>>& neighborsToCut); + // Remove pairs based on a function + void removeConnectivity(std::function excludePairs); + // Are we computing neighbors for ghosts? bool buildGhostConnectivity() const; @@ -176,7 +179,6 @@ public: // Check that the internal data structure is valid. bool valid() const; - private: //--------------------------- Private Interface ---------------------------// // The set of NodeLists. diff --git a/src/Neighbor/NodePairList.hh b/src/Neighbor/NodePairList.hh index 06299e468..23725b5e8 100644 --- a/src/Neighbor/NodePairList.hh +++ b/src/Neighbor/NodePairList.hh @@ -58,7 +58,8 @@ public: NodePairList(); void push_back(NodePairIdxType nodePair); - void clear(); + void clear(); + void resize(const size_t n) { mNodePairList.resize(n, NodePairIdxType(-1,-1,-1,-1)); } size_t size() const { return mNodePairList.size(); } void makeUnique();