Skip to content

Commit 2e24e6e

Browse files
committed
coll libnbc: Remove dead code
Remove dead code that was causing warnings about unused static functions. Signed-off-by: Brian Barrett <bbarrett@amazon.com>
1 parent 19e16d5 commit 2e24e6e

File tree

1 file changed

+0
-151
lines changed

1 file changed

+0
-151
lines changed

ompi/mca/coll/libnbc/nbc_ineighbor_allgather.c

Lines changed: 0 additions & 151 deletions
Original file line numberDiff line numberDiff line change
@@ -181,157 +181,6 @@ int ompi_coll_libnbc_ineighbor_allgather(const void *sbuf, int scount, MPI_Datat
181181
return OMPI_SUCCESS;
182182
}
183183

184-
/* better binomial bcast
185-
* working principle:
186-
* - each node gets a virtual rank vrank
187-
* - the 'root' node get vrank 0
188-
* - node 0 gets the vrank of the 'root'
189-
* - all other ranks stay identical (they do not matter)
190-
*
191-
* Algorithm:
192-
* - each node with vrank > 2^r and vrank < 2^r+1 receives from node
193-
* vrank - 2^r (vrank=1 receives from 0, vrank 0 receives never)
194-
* - each node sends each round r to node vrank + 2^r
195-
* - a node stops to send if 2^r > commsize
196-
*/
197-
#define RANK2VRANK(rank, vrank, root) \
198-
{ \
199-
vrank = rank; \
200-
if (rank == 0) vrank = root; \
201-
if (rank == root) vrank = 0; \
202-
}
203-
#define VRANK2RANK(rank, vrank, root) \
204-
{ \
205-
rank = vrank; \
206-
if (vrank == 0) rank = root; \
207-
if (vrank == root) rank = 0; \
208-
}
209-
static inline int bcast_sched_binomial(int rank, int p, int root, NBC_Schedule *schedule, void *buffer, int count, MPI_Datatype datatype) {
210-
int maxr, vrank, peer, res;
211-
212-
maxr = (int)ceil((log((double)p)/LOG2));
213-
214-
RANK2VRANK(rank, vrank, root);
215-
216-
/* receive from the right hosts */
217-
if (vrank != 0) {
218-
for (int r = 0 ; r < maxr ; ++r) {
219-
if ((vrank >= (1 << r)) && (vrank < (1 << (r + 1)))) {
220-
VRANK2RANK(peer, vrank - (1 << r), root);
221-
res = NBC_Sched_recv (buffer, false, count, datatype, peer, schedule, false);
222-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
223-
return res;
224-
}
225-
}
226-
}
227-
228-
res = NBC_Sched_barrier (schedule);
229-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
230-
return res;
231-
}
232-
}
233-
234-
/* now send to the right hosts */
235-
for (int r = 0 ; r < maxr ; ++r) {
236-
if (((vrank + (1 << r) < p) && (vrank < (1 << r))) || (vrank == 0)) {
237-
VRANK2RANK(peer, vrank + (1 << r), root);
238-
res = NBC_Sched_send (buffer, false, count, datatype, peer, schedule, false);
239-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
240-
return res;
241-
}
242-
}
243-
}
244-
245-
return OMPI_SUCCESS;
246-
}
247-
248-
/* simple linear MPI_Ibcast */
249-
static inline int bcast_sched_linear(int rank, int p, int root, NBC_Schedule *schedule, void *buffer, int count, MPI_Datatype datatype) {
250-
int res;
251-
252-
/* send to all others */
253-
if(rank == root) {
254-
for (int peer = 0 ; peer < p ; ++peer) {
255-
if (peer != root) {
256-
/* send msg to peer */
257-
res = NBC_Sched_send (buffer, false, count, datatype, peer, schedule, false);
258-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
259-
return res;
260-
}
261-
}
262-
}
263-
} else {
264-
/* recv msg from root */
265-
res = NBC_Sched_recv (buffer, false, count, datatype, root, schedule, false);
266-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
267-
return res;
268-
}
269-
}
270-
271-
return OMPI_SUCCESS;
272-
}
273-
274-
/* simple chained MPI_Ibcast */
275-
static inline int bcast_sched_chain(int rank, int p, int root, NBC_Schedule *schedule, void *buffer, int count, MPI_Datatype datatype, int fragsize, size_t size) {
276-
int res, vrank, rpeer, speer, numfrag, fragcount, thiscount;
277-
MPI_Aint ext;
278-
char *buf;
279-
280-
RANK2VRANK(rank, vrank, root);
281-
VRANK2RANK(rpeer, vrank-1, root);
282-
VRANK2RANK(speer, vrank+1, root);
283-
res = ompi_datatype_type_extent(datatype, &ext);
284-
if (MPI_SUCCESS != res) {
285-
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
286-
return res;
287-
}
288-
289-
if (count == 0) {
290-
return OMPI_SUCCESS;
291-
}
292-
293-
numfrag = count * size/fragsize;
294-
if ((count * size) % fragsize != 0) {
295-
numfrag++;
296-
}
297-
298-
fragcount = count/numfrag;
299-
300-
for (int fragnum = 0 ; fragnum < numfrag ; ++fragnum) {
301-
buf = (char *) buffer + fragnum * fragcount * ext;
302-
thiscount = fragcount;
303-
if (fragnum == numfrag-1) {
304-
/* last fragment may not be full */
305-
thiscount = count - fragcount * fragnum;
306-
}
307-
308-
/* root does not receive */
309-
if (vrank != 0) {
310-
res = NBC_Sched_recv (buf, false, thiscount, datatype, rpeer, schedule, true);
311-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
312-
return res;
313-
}
314-
}
315-
316-
/* last rank does not send */
317-
if (vrank != p-1) {
318-
res = NBC_Sched_send (buf, false, thiscount, datatype, speer, schedule, false);
319-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
320-
return res;
321-
}
322-
323-
/* this barrier here seems awaward but isn't!!!! */
324-
if (vrank == 0) {
325-
res = NBC_Sched_barrier (schedule);
326-
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
327-
return res;
328-
}
329-
}
330-
}
331-
}
332-
333-
return OMPI_SUCCESS;
334-
}
335184

336185
int ompi_coll_libnbc_neighbor_allgather_init(const void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
337186
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,

0 commit comments

Comments
 (0)