Line data Source code
1 : //------------------------------------------------------------------------------
2 : // LAGr_MaxFlow: max flow
3 : //------------------------------------------------------------------------------
4 :
5 : // LAGraph, (c) 2019-2022 by The LAGraph Contributors, All Rights Reserved.
6 : // SPDX-License-Identifier: BSD-2-Clause
7 : //
8 : // For additional details (including references to third party source code and
9 : // other files) see the LICENSE file or contact permission@sei.cmu.edu. See
10 : // Contributors.txt for a full list of contributors. Created, in part, with
11 : // funding and support from the U.S. Government (see Acknowledgments.txt file).
12 : // DM22-0790
13 :
14 : // Contributed by Darin Peries and Tim Davis, Texas A&M University
15 :
16 : //------------------------------------------------------------------------------
17 :
18 : // LAGr_MaxFlow is a GraphBLAS implementation of the push-relabel algorithm
19 : // of Baumstark et al. [1]
20 : //
21 : // [1] N. Baumstark, G. E. Blelloch, and J. Shun, "Efficient Implementation of
22 : // a Synchronous Parallel Push-Relabel Algorithm." In: Bansal, N., Finocchi, I.
23 : // (eds) Algorithms - ESA 2015. Lecture Notes in Computer Science(), vol 9294.
24 : // Springer, Berlin, Heidelberg. https://doi.org/10.1007/978-3-662-48350-3 10.
25 :
26 : #include <LAGraphX.h>
27 : #include "LG_internal.h"
28 : #include <LAGraph.h>
29 :
30 : #if LG_SUITESPARSE_GRAPHBLAS_V10
31 :
32 : //------------------------------------------------------------------------------
33 : // LG_augment_maxflow: sum current excess flow into the output flow
34 : //------------------------------------------------------------------------------
35 :
36 : #undef LG_FREE_ALL
37 : #define LG_FREE_ALL ;
38 :
39 556 : static GrB_Info LG_augment_maxflow
40 : (
41 : double *f, // total maxflow from src to sink
42 : GrB_Vector e, // excess vector, of type double
43 : GrB_Index sink, // sink node
44 : GrB_Vector src_and_sink, // mask vector, with just [src sink]
45 : GrB_Index *n_active, // # of active nodes
46 : char *msg
47 : )
48 : {
49 : // e_sink = e (sink)
50 556 : double e_sink = 0;
51 556 : GrB_Info info = GrB_Vector_extractElement(&e_sink, e, sink); //if value at sink
52 556 : GRB_TRY (info) ;
53 556 : if (info == GrB_SUCCESS)
54 : {
55 : // e(sink) is present
56 528 : (*f) += e_sink;
57 : }
58 :
59 : // TODO: what if e is tiny? Do we need a tol parameter,
60 : // and replace all "e > 0" and "r > 0" comparisons with
61 : // (e > tol), throughout the code?
62 :
63 : // e<!struct([src,sink])> = select e where (e > 0)
64 556 : GRB_TRY(GrB_select(e, src_and_sink, NULL, GrB_VALUEGT_FP64, e, 0, GrB_DESC_RSC));
65 :
66 : // n_active = # of entries in e
67 556 : GRB_TRY(GrB_Vector_nvals(n_active, e));
68 556 : return (GrB_SUCCESS) ;
69 : }
70 :
71 : //------------------------------------------------------------------------------
72 : // LG_global_relabel: global relabeling, based on a BFS from the sink node
73 : //------------------------------------------------------------------------------
74 :
75 : #undef LG_FREE_WORK
76 : #define LG_FREE_WORK \
77 : { \
78 : GrB_free(&C); \
79 : GrB_free(&T); \
80 : LAGraph_Delete(&G2, msg); \
81 : }
82 :
83 : #undef LG_FREE_ALL
84 : #define LG_FREE_ALL LG_FREE_WORK
85 :
86 42 : static GrB_Info LG_global_relabel
87 : (
88 : // inputs:
89 : GrB_Matrix R, // flow matrix
90 : GrB_Index sink, // sink node
91 : GrB_Vector src_and_sink, // mask vector, with just [src sink]
92 : GrB_UnaryOp GetResidual, // unary op to compute resid=capacity-flow
93 : // input/output:
94 : GrB_Vector d, // d(i) = height/label of node i
95 : // outputs:
96 : GrB_Vector *lvl, // lvl(i) = distance of node i from sink, if reachable
97 : char *msg
98 : )
99 : {
100 42 : GrB_Matrix C = NULL, T = NULL ;
101 42 : LAGraph_Graph G2 = NULL ;
102 : GrB_Index n ;
103 42 : GRB_TRY(GrB_Matrix_nrows(&n, R)) ;
104 42 : GRB_TRY(GrB_Matrix_new(&T, GrB_FP64, n, n));
105 42 : GRB_TRY(GrB_Matrix_new(&C, GrB_FP64, n, n));
106 : // C = GetResidual (R), computing the residual of each edge
107 42 : GRB_TRY(GrB_apply(C, NULL, NULL, GetResidual, R, NULL)) ;
108 : // prune zeros and negative entries from C
109 42 : GRB_TRY(GrB_select(C, NULL, NULL, GrB_VALUEGT_FP64, C, 0, NULL)) ;
110 : // T = C'
111 42 : GRB_TRY(GrB_transpose(T, NULL, NULL, C, NULL));
112 : // construct G2 and its cached transpose and outdegree
113 42 : LG_TRY(LAGraph_New(&G2, &T, LAGraph_ADJACENCY_DIRECTED, msg));
114 42 : G2->AT = C ;
115 42 : C = NULL ;
116 42 : LG_TRY(LAGraph_Cached_OutDegree(G2, msg));
117 : // compute lvl using bfs on G2, starting at sink node
118 42 : LG_TRY(LAGr_BreadthFirstSearch(lvl, NULL, G2, sink, msg));
119 : // d<!struct([src,sink])> = lvl
120 42 : GRB_TRY(GrB_assign(d, src_and_sink, NULL, *lvl, GrB_ALL, n, GrB_DESC_SC));
121 : // d<!struct(lvl)> = n
122 42 : GRB_TRY(GrB_assign(d, *lvl, NULL, n, GrB_ALL, n, GrB_DESC_SC));
123 42 : LG_FREE_WORK ;
124 42 : return (GrB_SUCCESS) ;
125 : }
126 :
127 : //------------------------------------------------------------------------------
128 :
129 : #undef LG_FREE_WORK
130 : #define LG_FREE_WORK \
131 : { \
132 : GrB_free(&FlowEdge); \
133 : GrB_free(&CompareTuple); \
134 : GrB_free(&ResultTuple); \
135 : GrB_free(&e); \
136 : GrB_free(&d); \
137 : GrB_free(&theta); \
138 : GrB_free(&R); \
139 : GrB_free(&Delta); \
140 : GrB_free(&delta_vec); \
141 : GrB_free(&Map); \
142 : GrB_free(&y); \
143 : GrB_free(&yd); \
144 : GrB_free(&src_and_sink); \
145 : GrB_free(&Jvec); \
146 : GrB_free(&Prune); \
147 : GrB_free(&UpdateFlow); \
148 : GrB_free(&Relabel); \
149 : GrB_free(&ResidualFlow); \
150 : GrB_free(&MxeIndexMult); \
151 : GrB_free(&MxeMult); \
152 : GrB_free(&MxeAdd); \
153 : GrB_free(&MxeAddMonoid); \
154 : GrB_free(&MxeSemiring); \
155 : GrB_free(&ExtractJ); \
156 : GrB_free(&CreateCompareVec); \
157 : GrB_free(&RxdSemiring); \
158 : GrB_free(&RxdAdd); \
159 : GrB_free(&RxdAddMonoid); \
160 : GrB_free(&RxdIndexMult); \
161 : GrB_free(&RxdMult); \
162 : GrB_free(&InitForw); \
163 : GrB_free(&InitBack); \
164 : GrB_free(&ResidualForward); \
165 : GrB_free(&ResidualBackward); \
166 : GrB_free(&zero); \
167 : GrB_free(&empty); \
168 : GrB_free(&t); \
169 : GrB_free(&invariant); \
170 : GrB_free(&CheckInvariant); \
171 : GrB_free(&check); \
172 : GrB_free(&ExtractYJ); \
173 : GrB_free(&desc); \
174 : GrB_free(&MakeFlow); \
175 : GrB_free(&GetResidual); \
176 : GrB_free(&lvl) ; \
177 : GrB_free(&ExtractMatrixFlow); \
178 : }
179 :
180 : #undef LG_FREE_ALL
181 : #define LG_FREE_ALL \
182 : { \
183 : LG_FREE_WORK ; \
184 : GrB_free(flow_mtx) ; \
185 : }
186 :
187 : // helper macro for creating types and operators
188 : #define JIT_STR(f, var) char* var = #f; f
189 :
190 : //casting for unary ops
191 : #define F_UNARY(f) ((void (*)(void *, const void *))f)
192 :
193 : // casting for index binary ops
194 : #define F_INDEX_BINARY(f) ((void (*)(void*, const void*, GrB_Index, GrB_Index, const void *, GrB_Index, GrB_Index, const void *)) f)
195 :
196 : // casting for binary op
197 : #define F_BINARY(f) ((void (*)(void *, const void *, const void *)) f)
198 :
199 : //------------------------------------------------------------------------------
200 : // custom types
201 : //------------------------------------------------------------------------------
202 :
203 : // type of the R matrix: LG_MF_flowEdge (FlowEdge)
204 : JIT_STR(typedef struct{
205 : double capacity; /* original edge weight A(i,j), always positive */
206 : double flow; /* current flow along this edge (i,j); can be negative */
207 : } LG_MF_flowEdge;, FLOWEDGE_STR)
208 :
209 : // type of the y vector for y = R*d: LG_MF_resultTuple64/32 (ResultTuple)
210 : JIT_STR(typedef struct{
211 : double residual; /* residual = capacity - flow for the edge (i,j) */
212 : int64_t d; /* d(j) of the target node j */
213 : int64_t j; /* node id of the target node j */
214 : } LG_MF_resultTuple64;, RESULTTUPLE_STR64)
215 : JIT_STR(typedef struct{
216 : double residual; /* residual = capacity - flow for the edge (i,j) */
217 : int32_t d; /* d(j) of the target node j */
218 : int32_t j; /* node id of the target node j */
219 : } LG_MF_resultTuple32;, RESULTTUPLE_STR32)
220 :
221 : // type of the Map matrix and yd vector: LG_MF_compareTuple64/32 (CompareTuple)
222 : JIT_STR(typedef struct{
223 : double residual; /* residual = capacity - flow for the edge (i,j) */
224 : int64_t di; /* d(i) for node i */
225 : int64_t dj; /* d(j) for node j */
226 : int64_t j; /* node id for node j */
227 : } LG_MF_compareTuple64;, COMPARETUPLE_STR64)
228 : JIT_STR(typedef struct{
229 : double residual; /* residual = capacity - flow for the edge (i,j) */
230 : int32_t di; /* d(i) for node i */
231 : int32_t dj; /* d(j) for node j */
232 : int32_t j; /* node id for node j */
233 : int32_t unused; /* to pad the struct to 24 bytes */
234 : } LG_MF_compareTuple32;, COMPARETUPLE_STR32) // 24 bytes: padded
235 :
236 : //------------------------------------------------------------------------------
237 : // unary ops to create R from input adjacency matrix G->A and G->AT
238 : //------------------------------------------------------------------------------
239 :
240 : // unary op for R = ResidualForward (A)
241 11032 : JIT_STR(void LG_MF_ResidualForward(LG_MF_flowEdge *z, const double *y) {
242 : z->capacity = (*y);
243 : z->flow = 0;
244 : }, CRF_STR)
245 :
246 : // unary op for R<!struct(A)> = ResidualBackward (AT)
247 11032 : JIT_STR(void LG_MF_ResidualBackward(LG_MF_flowEdge *z, const double *y) {
248 : z->capacity = 0;
249 : z->flow = 0;
250 : }, CRB_STR)
251 :
252 : //------------------------------------------------------------------------------
253 : // R*d semiring
254 : //------------------------------------------------------------------------------
255 :
256 : // multiplicative operator, z = R(i,j) * d(j), 64-bit case
257 1436 : JIT_STR(void LG_MF_RxdMult64(LG_MF_resultTuple64 *z,
258 : const LG_MF_flowEdge *x, GrB_Index i, GrB_Index j,
259 : const int64_t *y, GrB_Index iy, GrB_Index jy,
260 : const bool* theta) {
261 : double r = x->capacity - x->flow;
262 : if(r > 0){
263 : z->residual = r;
264 : z->d = (*y);
265 : z->j = j;
266 : }
267 : else{
268 : z->residual = 0;
269 : z->d = INT64_MAX;
270 : z->j = -1;
271 : }
272 : }, RXDMULT_STR64)
273 :
274 : // multiplicative operator, z = R(i,j) * d(j), 32-bit case
275 752 : JIT_STR(void LG_MF_RxdMult32(LG_MF_resultTuple32 *z,
276 : const LG_MF_flowEdge *x, GrB_Index i, GrB_Index j,
277 : const int32_t *y, GrB_Index iy, GrB_Index jy,
278 : const bool* theta) {
279 : double r = x->capacity - x->flow;
280 : if(r > 0){
281 : z->residual = r;
282 : z->d = (*y);
283 : z->j = j;
284 : }
285 : else{
286 : z->residual = 0;
287 : z->d = INT32_MAX;
288 : z->j = -1;
289 : }
290 : }, RXDMULT_STR32)
291 :
292 : // additive monoid: z = the best tuple, x or y, 64-bit case
293 1256 : JIT_STR(void LG_MF_RxdAdd64(LG_MF_resultTuple64 * z,
294 : const LG_MF_resultTuple64 * x,
295 : const LG_MF_resultTuple64 * y) {
296 : if(x->d < y->d){
297 : (*z) = (*x) ;
298 : }
299 : else if(x->d > y->d){
300 : (*z) = (*y) ;
301 : }
302 : else{
303 : if(x->residual > y->residual){
304 : (*z) = (*x) ;
305 : }
306 : else if(x->residual < y->residual){
307 : (*z) = (*y) ;
308 : }
309 : else{
310 : if(x->j > y->j){
311 : (*z) = (*x);
312 : }
313 : else{
314 : (*z) = (*y) ;
315 : }
316 : }
317 : }
318 : }, RXDADD_STR64)
319 :
320 : // additive monoid: z = the best tuple, x or y, 32-bit case
321 464 : JIT_STR(void LG_MF_RxdAdd32(LG_MF_resultTuple32 * z,
322 : const LG_MF_resultTuple32 * x, const LG_MF_resultTuple32 * y) {
323 : if(x->d < y->d){
324 : (*z) = (*x) ;
325 : }
326 : else if(x->d > y->d){
327 : (*z) = (*y) ;
328 : }
329 : else{
330 : if(x->residual > y->residual){
331 : (*z) = (*x) ;
332 : }
333 : else if(x->residual < y->residual){
334 : (*z) = (*y) ;
335 : }
336 : else{
337 : if(x->j > y->j){
338 : (*z) = (*x);
339 : }
340 : else{
341 : (*z) = (*y) ;
342 : }
343 : }
344 : }
345 : }, RXDADD_STR32)
346 :
347 : //------------------------------------------------------------------------------
348 : // unary ops for delta_vec = ResidualFlow (y)
349 : //------------------------------------------------------------------------------
350 :
351 180 : JIT_STR(void LG_MF_ResidualFlow64(double *z, const LG_MF_resultTuple64 *x)
352 : { (*z) = x->residual; }, RESIDUALFLOW_STR64)
353 :
354 284 : JIT_STR(void LG_MF_ResidualFlow32(double *z, const LG_MF_resultTuple32 *x)
355 : { (*z) = x->residual; }, RESIDUALFLOW_STR32)
356 :
357 : //------------------------------------------------------------------------------
358 : // binary op for R<Delta> = UpdateFlow (R, Delta) using eWiseMult
359 : //------------------------------------------------------------------------------
360 :
361 928 : JIT_STR(void LG_MF_UpdateFlow(LG_MF_flowEdge *z,
362 : const LG_MF_flowEdge *x, const double *y) {
363 : z->capacity = x->capacity;
364 : z->flow = x->flow + (*y);
365 : }, UPDATEFLOW_STR)
366 :
367 : //------------------------------------------------------------------------------
368 : // binary op for d<struct(y)> = Relabel (d, y) using eWiseMult
369 : //------------------------------------------------------------------------------
370 :
371 180 : JIT_STR(void LG_MF_Relabel64(int64_t *z,
372 : const int64_t *x, const LG_MF_resultTuple64 *y) {
373 : if((*x) < y->d+1){
374 : (*z) = y->d + 1;
375 : }
376 : else {
377 : (*z) = (*x);
378 : }
379 : }, RELABEL_STR64)
380 :
381 284 : JIT_STR(void LG_MF_Relabel32(int32_t *z,
382 : const int32_t *x, const LG_MF_resultTuple32 *y) {
383 : if((*x) < y->d+1){
384 : (*z) = y->d + 1;
385 : }
386 : else {
387 : (*z) = (*x);
388 : }
389 : }, RELABEL_STR32)
390 :
391 : //------------------------------------------------------------------------------
392 : // unary op for Jvec = ExtractJ (yd), where Jvec(i) = yd(i)->j
393 : //------------------------------------------------------------------------------
394 :
395 180 : JIT_STR(void LG_MF_ExtractJ64(int64_t *z, const LG_MF_compareTuple64 *x) { (*z) = x->j; }, EXTRACTJ_STR64)
396 :
397 288 : JIT_STR(void LG_MF_ExtractJ32(int32_t *z, const LG_MF_compareTuple32 *x) { (*z) = x->j; }, EXTRACTJ_STR32)
398 :
399 : //------------------------------------------------------------------------------
400 : // unary op for Jvec = ExtractYJ (y), where Jvec(i) = y(i)->j
401 : //------------------------------------------------------------------------------
402 :
403 180 : JIT_STR(void LG_MF_ExtractYJ64(int64_t *z, const LG_MF_resultTuple64 *x) { (*z) = x->j; }, EXTRACTYJ_STR64)
404 :
405 284 : JIT_STR(void LG_MF_ExtractYJ32(int32_t *z, const LG_MF_resultTuple32 *x) { (*z) = x->j; }, EXTRACTYJ_STR32)
406 :
407 : //------------------------------------------------------------------------------
408 : // binary op for R(src,:) = InitForw (R (src,:), t')
409 : //------------------------------------------------------------------------------
410 :
411 32 : JIT_STR(void LG_MF_InitForw(LG_MF_flowEdge * z,
412 : const LG_MF_flowEdge * x, const LG_MF_flowEdge * y){
413 : z->capacity = x->capacity;
414 : z->flow = y->flow + x->flow;
415 : }, INITFORW_STR)
416 :
417 : //------------------------------------------------------------------------------
418 : // binary op for R(:,src) = InitBack (R (:,src), t)
419 : //------------------------------------------------------------------------------
420 :
421 32 : JIT_STR(void LG_MF_InitBack(LG_MF_flowEdge * z,
422 : const LG_MF_flowEdge * x, const LG_MF_flowEdge * y){
423 : z->capacity = x->capacity;
424 : z->flow = x->flow - y->flow;
425 : }, INITBACK_STR)
426 :
427 : //------------------------------------------------------------------------------
428 : // y = Map*e semiring
429 : //------------------------------------------------------------------------------
430 :
431 : // multiplicative operator, z = Map(i,j)*e(j), 64-bit case
432 180 : JIT_STR(void LG_MF_MxeMult64(LG_MF_resultTuple64 * z,
433 : const LG_MF_compareTuple64 * x, GrB_Index i, GrB_Index j,
434 : const double * y, GrB_Index iy, GrB_Index jy,
435 : const bool* theta){
436 : bool j_active = ((*y) > 0) ;
437 : if ((x->di < x->dj-1) /* case a */
438 : || (x->di == x->dj-1 && !j_active) /* case b */
439 : || (x->di == x->dj && (!j_active || (j_active && (i < j)))) /* case c */
440 : || (x->di == x->dj+1)) /* case d */
441 : {
442 : z->residual = x->residual;
443 : z->d = x->dj;
444 : z->j = x->j;
445 : }
446 : else
447 : {
448 : z->residual = 0;
449 : z->d = INT64_MAX;
450 : z->j = -1;
451 : }
452 : }, MXEMULT_STR64)
453 :
454 : // multiplicative operator, z = Map(i,j)*e(j), 32-bit case
455 288 : JIT_STR(void LG_MF_MxeMult32(LG_MF_resultTuple32 * z,
456 : const LG_MF_compareTuple32 * x, GrB_Index i, GrB_Index j,
457 : const double * y, GrB_Index iy, GrB_Index jy,
458 : const bool* theta){
459 : bool j_active = ((*y) > 0) ;
460 : if ((x->di < x->dj-1) /* case a */
461 : || (x->di == x->dj-1 && !j_active) /* case b */
462 : || (x->di == x->dj && (!j_active || (j_active && (i < j)))) /* case c */
463 : || (x->di == x->dj+1)) /* case d */
464 : {
465 : z->residual = x->residual;
466 : z->d = x->dj;
467 : z->j = x->j;
468 : }
469 : else
470 : {
471 : z->residual = 0;
472 : z->d = INT32_MAX;
473 : z->j = -1;
474 : }
475 : }, MXEMULT_STR32)
476 :
477 : // Note: the additive monoid is not actually used in the call to GrB_mxv below,
478 : // because any given node only pushes to one neighbor at a time. As a result,
479 : // no reduction is needed in GrB_mxv. The semiring still needs a monoid,
480 : // however.
481 4 : JIT_STR(void LG_MF_MxeAdd64(LG_MF_resultTuple64 * z,
482 : const LG_MF_resultTuple64 * x, const LG_MF_resultTuple64 * y){
483 : (*z) = (*y) ;
484 : }, MXEADD_STR64)
485 :
486 10 : JIT_STR(void LG_MF_MxeAdd32(LG_MF_resultTuple32 * z,
487 : const LG_MF_resultTuple32 * x, const LG_MF_resultTuple32 * y){
488 : (*z) = (*y) ;
489 : }, MXEADD_STR32)
490 :
491 : //------------------------------------------------------------------------------
492 : // binary op for yd = CreateCompareVec (y,d) using eWiseMult
493 : //------------------------------------------------------------------------------
494 :
495 180 : JIT_STR(void LG_MF_CreateCompareVec64(LG_MF_compareTuple64 *comp,
496 : const LG_MF_resultTuple64 *res, const int64_t *height) {
497 : comp->di = (*height);
498 : comp->residual = res->residual;
499 : comp->dj = res->d;
500 : comp->j = res->j;
501 : }, CREATECOMPAREVEC_STR64)
502 :
503 288 : JIT_STR(void LG_MF_CreateCompareVec32(LG_MF_compareTuple32 *comp,
504 : const LG_MF_resultTuple32 *res, const int32_t *height) {
505 : comp->di = (*height);
506 : comp->residual = res->residual;
507 : comp->dj = res->d;
508 : comp->j = res->j;
509 : comp->unused = 0 ;
510 : }, CREATECOMPAREVEC_STR32)
511 :
512 : //------------------------------------------------------------------------------
513 : // index unary op to remove empty tuples from y (for which y->j is -1)
514 : //------------------------------------------------------------------------------
515 :
516 686 : JIT_STR(void LG_MF_Prune64(bool * z, const LG_MF_resultTuple64 * x,
517 : GrB_Index ix, GrB_Index jx, const bool * theta){
518 : *z = (x->j != -1) ;
519 : }, PRUNE_STR64)
520 :
521 928 : JIT_STR(void LG_MF_Prune32(bool * z, const LG_MF_resultTuple32 * x,
522 : GrB_Index ix, GrB_Index jx, const bool * theta){
523 : *z = (x->j != -1) ;
524 : }, PRUNE_STR32)
525 :
526 : //------------------------------------------------------------------------------
527 : // unary op for t = MakeFlow (e), where t(i) = (0, e(i))
528 : //------------------------------------------------------------------------------
529 :
530 34 : JIT_STR(void LG_MF_MakeFlow(LG_MF_flowEdge * flow_edge, const double * flow){
531 : flow_edge->capacity = 0;
532 : flow_edge->flow = (*flow);
533 : }, MAKEFLOW_STR)
534 :
535 : //------------------------------------------------------------------------------
536 : // binary op CheckInvariant to check invariants (debugging only)
537 : //------------------------------------------------------------------------------
538 :
539 : #ifdef DBG
540 : JIT_STR(void LG_MF_CheckInvariant64(bool *z, const int64_t *height,
541 : const LG_MF_resultTuple64 *result) {
542 : (*z) = ((*height) == result->d+1);
543 : }, CHECKINVARIANT_STR64)
544 :
545 : JIT_STR(void LG_MF_CheckInvariant32(bool *z, const int32_t *height,
546 : const LG_MF_resultTuple32 *result) {
547 : (*z) = ((*height) == result->d+1);
548 : }, CHECKINVARIANT_STR32)
549 : #endif
550 :
551 : //------------------------------------------------------------------------------
552 : // binary op for C = GetResidual (R), computing the residual of each edge
553 : //------------------------------------------------------------------------------
554 :
555 12816 : JIT_STR(void LG_MF_GetResidual(double * res, const LG_MF_flowEdge * flow_edge){
556 : (*res) = flow_edge->capacity - flow_edge->flow;
557 : }, GETRESIDUAL_STR)
558 :
559 : //------------------------------------------------------------------------------
560 : // unary op for flow_mtx = ExtractMatrixFlow (R)
561 : //------------------------------------------------------------------------------
562 :
563 5778 : JIT_STR(void LG_MF_ExtractMatrixFlow(double* flow, const LG_MF_flowEdge* edge){*flow = edge->flow;}, EMFLOW_STR)
564 :
565 : #endif
566 :
567 : //------------------------------------------------------------------------------
568 : // LAGraph_MaxFlow
569 : //------------------------------------------------------------------------------
570 :
571 28 : int LAGr_MaxFlow
572 : (
573 : // output:
574 : double *f, // max flow from src node to sink node
575 : GrB_Matrix *flow_mtx, // optional output flow matrix
576 : // input:
577 : LAGraph_Graph G, // graph to compute maxflow on
578 : GrB_Index src, // source node
579 : GrB_Index sink, // sink node
580 : char *msg
581 : )
582 : {
583 :
584 : #if LG_SUITESPARSE_GRAPHBLAS_V10
585 :
586 : //----------------------------------------------------------------------------
587 : // declare variables
588 : //----------------------------------------------------------------------------
589 :
590 : // types
591 28 : GrB_Type FlowEdge = NULL ;
592 28 : GrB_Type ResultTuple = NULL ;
593 28 : GrB_Type CompareTuple = NULL ;
594 :
595 28 : GrB_Vector lvl = NULL ;
596 28 : GrB_UnaryOp GetResidual = NULL ;
597 :
598 : // to create R
599 28 : GrB_UnaryOp ResidualForward = NULL, ResidualBackward = NULL ;
600 28 : GrB_Matrix R = NULL ;
601 :
602 : // to initialize R with initial saturated flows
603 28 : GrB_Vector e = NULL, t = NULL ;
604 28 : GrB_UnaryOp MakeFlow = NULL ;
605 28 : GrB_BinaryOp InitForw = NULL, InitBack = NULL ;
606 :
607 : // height/label vector
608 28 : GrB_Vector d = NULL ;
609 :
610 : // src and sink mask vector and n_active
611 28 : GrB_Vector src_and_sink = NULL ;
612 28 : GrB_Index n_active = INT64_MAX ;
613 :
614 : // semiring and vectors for y<struct(e)> = R x d
615 28 : GrB_Vector y = NULL ;
616 28 : GrB_IndexUnaryOp Prune = NULL ;
617 28 : GxB_IndexBinaryOp RxdIndexMult = NULL ;
618 28 : GrB_BinaryOp RxdAdd = NULL, RxdMult = NULL ;
619 28 : GrB_Monoid RxdAddMonoid = NULL ;
620 28 : GrB_Semiring RxdSemiring = NULL ;
621 28 : GrB_Scalar theta = NULL ;
622 :
623 : // binary op and yd
624 28 : GrB_Vector yd = NULL ;
625 28 : GrB_BinaryOp CreateCompareVec = NULL ;
626 :
627 : // utility vectors, Matrix, and ops for mapping
628 28 : GrB_Matrix Map = NULL ;
629 28 : GrB_Vector Jvec = NULL ;
630 28 : GrB_UnaryOp ExtractJ = NULL, ExtractYJ = NULL ;
631 :
632 : // Map*e semiring
633 28 : GrB_Semiring MxeSemiring = NULL ;
634 28 : GrB_Monoid MxeAddMonoid = NULL ;
635 28 : GrB_BinaryOp MxeAdd = NULL, MxeMult = NULL ;
636 28 : GxB_IndexBinaryOp MxeIndexMult = NULL ;
637 :
638 : // to extract the residual flow
639 28 : GrB_UnaryOp ResidualFlow = NULL ;
640 28 : GrB_UnaryOp ExtractMatrixFlow = NULL ;
641 :
642 : // Delta structures
643 28 : GrB_Vector delta_vec = NULL ;
644 28 : GrB_Matrix Delta = NULL ;
645 :
646 : // update height
647 28 : GrB_BinaryOp Relabel = NULL ;
648 :
649 : // update R structure
650 28 : GrB_BinaryOp UpdateFlow = NULL ;
651 :
652 : // scalars
653 28 : GrB_Scalar zero = NULL ;
654 28 : GrB_Scalar empty = NULL ;
655 :
656 : // invariant (for debugging only)
657 28 : GrB_Vector invariant = NULL ;
658 28 : GrB_BinaryOp CheckInvariant = NULL ;
659 28 : GrB_Scalar check = NULL ;
660 : bool check_raw;
661 :
662 : // descriptor for matrix building
663 28 : GrB_Descriptor desc = NULL ;
664 :
665 : //----------------------------------------------------------------------------
666 : // check inputs
667 : //----------------------------------------------------------------------------
668 :
669 28 : if (flow_mtx != NULL)
670 : {
671 14 : (*flow_mtx) = NULL ;
672 : }
673 28 : LG_TRY(LAGraph_CheckGraph(G, msg));
674 28 : LG_ASSERT (f != NULL, GrB_NULL_POINTER) ;
675 28 : (*f) = 0;
676 : GrB_Index nrows, n;
677 28 : GRB_TRY(GrB_Matrix_ncols(&n, G->A));
678 28 : GRB_TRY(GrB_Matrix_nrows(&nrows, G->A));
679 28 : LG_ASSERT_MSG(nrows == n, GrB_INVALID_VALUE, "Matrix must be square");
680 28 : LG_ASSERT_MSG(src < n && src >= 0 && sink < n && sink >= 0,
681 : GrB_INVALID_VALUE, "src and sink must be a value between [0, n)");
682 28 : LG_ASSERT_MSG(G->emin > 0, GrB_INVALID_VALUE,
683 : "the edge weights (capacities) must be greater than 0");
684 :
685 : //get adjacency matrix and its transpose
686 28 : GrB_Matrix A = G->A;
687 28 : GrB_Matrix AT = NULL ;
688 28 : if (G->kind == LAGraph_ADJACENCY_UNDIRECTED)
689 : {
690 : // G is undirected, so A and AT are the same
691 4 : AT = G->A ;
692 : }
693 : else
694 : {
695 : // G is directed; get G->AT, which must be present
696 24 : AT = G->AT ;
697 24 : LG_ASSERT_MSG (AT != NULL, LAGRAPH_NOT_CACHED, "G->AT is required") ;
698 : }
699 :
700 : //----------------------------------------------------------------------------
701 : // create types, operators, matrices, and vectors
702 : //----------------------------------------------------------------------------
703 :
704 : // create types for computation
705 28 : GRB_TRY(GxB_Type_new(&FlowEdge, sizeof(LG_MF_flowEdge), "LG_MF_flowEdge", FLOWEDGE_STR));
706 :
707 28 : GRB_TRY(GxB_UnaryOp_new(&GetResidual, F_UNARY(LG_MF_GetResidual), GrB_FP64, FlowEdge,
708 : "LG_MF_GetResidual", GETRESIDUAL_STR));
709 :
710 : #ifdef DBG
711 : GRB_TRY(GrB_Scalar_new(&check, GrB_BOOL));
712 : GRB_TRY(GrB_Scalar_setElement(check, false));
713 : GRB_TRY(GrB_Vector_new(&invariant, GrB_BOOL, n));
714 : #endif
715 :
716 : // ops create R from A
717 28 : GRB_TRY(GxB_UnaryOp_new(&ResidualForward,
718 : F_UNARY(LG_MF_ResidualForward), FlowEdge , GrB_FP64,
719 : "LG_MF_ResidualForward", CRF_STR));
720 28 : GRB_TRY(GxB_UnaryOp_new(&ResidualBackward,
721 : F_UNARY(LG_MF_ResidualBackward), FlowEdge , GrB_FP64,
722 : "LG_MF_ResidualBackward", CRB_STR));
723 :
724 : // ops to initialize R with initial saturated flows from the source node
725 28 : GRB_TRY(GxB_BinaryOp_new(&InitForw,
726 : F_BINARY(LG_MF_InitForw), FlowEdge, FlowEdge, FlowEdge,
727 : "LG_MF_InitForw", INITFORW_STR));
728 28 : GRB_TRY(GxB_BinaryOp_new(&InitBack,
729 : F_BINARY(LG_MF_InitBack), FlowEdge, FlowEdge, FlowEdge,
730 : "LG_MF_InitBack", INITBACK_STR));
731 28 : GRB_TRY(GxB_UnaryOp_new(&MakeFlow, F_UNARY(LG_MF_MakeFlow), FlowEdge, GrB_FP64,
732 : "LG_MF_MakeFlow", MAKEFLOW_STR));
733 :
734 : // construct [src,sink] mask
735 28 : GRB_TRY(GrB_Vector_new(&src_and_sink, GrB_BOOL, n));
736 28 : GRB_TRY (GrB_Vector_setElement (src_and_sink, true, sink)) ;
737 28 : GRB_TRY (GrB_Vector_setElement (src_and_sink, true, src)) ;
738 :
739 : // create delta vector and Delta matrix
740 28 : GRB_TRY(GrB_Matrix_new(&Delta, GrB_FP64, n, n));
741 28 : GRB_TRY(GrB_Vector_new(&delta_vec, GrB_FP64, n));
742 :
743 : // operator to update R structure
744 28 : GRB_TRY(GxB_BinaryOp_new(&UpdateFlow,
745 : F_BINARY(LG_MF_UpdateFlow), FlowEdge, FlowEdge, GrB_FP64,
746 : "LG_MF_UpdateFlow", UPDATEFLOW_STR));
747 :
748 : // create scalars
749 28 : GRB_TRY(GrB_Scalar_new(&zero, GrB_FP64));
750 28 : GRB_TRY(GrB_Scalar_setElement(zero, 0));
751 28 : GRB_TRY(GrB_Scalar_new (&empty, GrB_FP64)) ;
752 28 : GRB_TRY(GrB_Scalar_new(&theta, GrB_BOOL)); // unused placeholder
753 28 : GRB_TRY(GrB_Scalar_setElement_BOOL(theta, false));
754 :
755 : // create op for optional output flow_mtx
756 28 : if (flow_mtx != NULL)
757 : {
758 14 : GRB_TRY(GxB_UnaryOp_new(&ExtractMatrixFlow,
759 : F_UNARY(LG_MF_ExtractMatrixFlow), GrB_FP64, FlowEdge,
760 : "LG_MF_ExtractMatrixFlow", EMFLOW_STR));
761 : }
762 :
763 : //----------------------------------------------------------------------------
764 : // determine the integer type to use for the problem
765 : //----------------------------------------------------------------------------
766 :
767 28 : GrB_Type Integer_Type = NULL ;
768 :
769 : #ifdef COVERAGE
770 : // Just for test coverage, use 64-bit ints for n > 100. Do not use this
771 : // rule in production!
772 : #define NBIG 100
773 : #else
774 : // For production use: 64-bit integers if n > 2^31
775 : #define NBIG INT32_MAX
776 : #endif
777 28 : if (n > NBIG){
778 :
779 : //--------------------------------------------------------------------------
780 : // use 64-bit integers
781 : //--------------------------------------------------------------------------
782 :
783 8 : Integer_Type = GrB_INT64 ;
784 :
785 : // create types for computation
786 8 : GRB_TRY(GxB_Type_new(&ResultTuple, sizeof(LG_MF_resultTuple64),
787 : "LG_MF_resultTuple64", RESULTTUPLE_STR64));
788 8 : GRB_TRY(GxB_Type_new(&CompareTuple, sizeof(LG_MF_compareTuple64),
789 : "LG_MF_compareTuple64", COMPARETUPLE_STR64));
790 :
791 : // invariant check
792 : #ifdef DBG
793 : GRB_TRY(GxB_BinaryOp_new(&CheckInvariant,
794 : F_BINARY(LG_MF_CheckInvariant64), GrB_BOOL, GrB_INT64, ResultTuple,
795 : "LG_MF_CheckInvariant64", CHECKINVARIANT_STR64));
796 : #endif
797 :
798 8 : GRB_TRY(GxB_UnaryOp_new(&ResidualFlow,
799 : F_UNARY(LG_MF_ResidualFlow64), GrB_FP64, ResultTuple,
800 : "LG_MF_ResidualFlow64", RESIDUALFLOW_STR64));
801 :
802 : // create ops for R*d semiring
803 :
804 8 : GRB_TRY(GxB_IndexBinaryOp_new(&RxdIndexMult,
805 : F_INDEX_BINARY(LG_MF_RxdMult64), ResultTuple, FlowEdge, GrB_INT64, GrB_BOOL,
806 : "LG_MF_RxdMult64", RXDMULT_STR64));
807 8 : GRB_TRY(GxB_BinaryOp_new_IndexOp(&RxdMult, RxdIndexMult, theta));
808 8 : GRB_TRY(GxB_BinaryOp_new(&RxdAdd,
809 : F_BINARY(LG_MF_RxdAdd64), ResultTuple, ResultTuple, ResultTuple,
810 : "LG_MF_RxdAdd64", RXDADD_STR64));
811 8 : LG_MF_resultTuple64 id = {.d = INT64_MAX, .j = -1, .residual = 0};
812 8 : GRB_TRY(GrB_Monoid_new_UDT(&RxdAddMonoid, RxdAdd, &id));
813 :
814 : // create binary op for yd
815 8 : GRB_TRY(GxB_BinaryOp_new(&CreateCompareVec,
816 : F_BINARY(LG_MF_CreateCompareVec64), CompareTuple, ResultTuple, GrB_INT64,
817 : "LG_MF_CreateCompareVec64", CREATECOMPAREVEC_STR64));
818 :
819 : // create op to prune empty tuples
820 8 : GRB_TRY(GxB_IndexUnaryOp_new(&Prune,
821 : (GxB_index_unary_function) LG_MF_Prune64, GrB_BOOL, ResultTuple, GrB_BOOL,
822 : "LG_MF_Prune64", PRUNE_STR64));
823 :
824 : // create ops for mapping
825 8 : GRB_TRY(GxB_UnaryOp_new(&ExtractJ,
826 : F_UNARY(LG_MF_ExtractJ64), GrB_INT64, CompareTuple,
827 : "LG_MF_ExtractJ64", EXTRACTJ_STR64));
828 8 : GRB_TRY(GxB_UnaryOp_new(&ExtractYJ,
829 : F_UNARY(LG_MF_ExtractYJ64), GrB_INT64, ResultTuple,
830 : "LG_MF_ExtractYJ64", EXTRACTYJ_STR64));
831 :
832 : // create ops for Map*e semiring
833 8 : GRB_TRY(GxB_IndexBinaryOp_new(&MxeIndexMult,
834 : F_INDEX_BINARY(LG_MF_MxeMult64), ResultTuple, CompareTuple, GrB_FP64, GrB_BOOL,
835 : "LG_MF_MxeMult64", MXEMULT_STR64));
836 8 : GRB_TRY(GxB_BinaryOp_new_IndexOp(&MxeMult, MxeIndexMult, theta));
837 8 : GRB_TRY(GxB_BinaryOp_new(&MxeAdd,
838 : F_BINARY(LG_MF_MxeAdd64), ResultTuple, ResultTuple, ResultTuple,
839 : "LG_MF_MxeAdd64", MXEADD_STR64));
840 8 : GRB_TRY(GrB_Monoid_new_UDT(&MxeAddMonoid, MxeAdd, &id));
841 :
842 : // update height binary op
843 8 : GRB_TRY(GxB_BinaryOp_new(&Relabel,
844 : F_BINARY(LG_MF_Relabel64), GrB_INT64, GrB_INT64, ResultTuple,
845 : "LG_MF_Relabel64", RELABEL_STR64));
846 :
847 : }else{
848 :
849 : //--------------------------------------------------------------------------
850 : // use 32-bit integers
851 : //--------------------------------------------------------------------------
852 :
853 20 : Integer_Type = GrB_INT32 ;
854 :
855 : // create types for computation
856 20 : GRB_TRY(GxB_Type_new(&ResultTuple, sizeof(LG_MF_resultTuple32),
857 : "LG_MF_resultTuple32", RESULTTUPLE_STR32));
858 20 : GRB_TRY(GxB_Type_new(&CompareTuple, sizeof(LG_MF_compareTuple32),
859 : "LG_MF_compareTuple32", COMPARETUPLE_STR32));
860 :
861 : // invariant check
862 : #ifdef DBG
863 : GRB_TRY(GxB_BinaryOp_new(&CheckInvariant,
864 : F_BINARY(LG_MF_CheckInvariant32), GrB_BOOL, GrB_INT32, ResultTuple,
865 : "LG_MF_CheckInvariant32", CHECKINVARIANT_STR32));
866 : #endif
867 :
868 20 : GRB_TRY(GxB_UnaryOp_new(&ResidualFlow,
869 : F_UNARY(LG_MF_ResidualFlow32), GrB_FP64, ResultTuple,
870 : "LG_MF_ResidualFlow32", RESIDUALFLOW_STR32));
871 :
872 : // create ops for R*d semiring
873 20 : GRB_TRY(GxB_IndexBinaryOp_new(&RxdIndexMult,
874 : F_INDEX_BINARY(LG_MF_RxdMult32), ResultTuple, FlowEdge, GrB_INT32, GrB_BOOL,
875 : "LG_MF_RxdMult32", RXDMULT_STR32));
876 20 : GRB_TRY(GxB_BinaryOp_new_IndexOp(&RxdMult, RxdIndexMult, theta));
877 20 : GRB_TRY(GxB_BinaryOp_new(&RxdAdd,
878 : F_BINARY(LG_MF_RxdAdd32), ResultTuple, ResultTuple, ResultTuple,
879 : "LG_MF_RxdAdd32", RXDADD_STR32));
880 20 : LG_MF_resultTuple32 id = {.d = INT32_MAX, .j = -1, .residual = 0};
881 20 : GRB_TRY(GrB_Monoid_new_UDT(&RxdAddMonoid, RxdAdd, &id));
882 :
883 : // create binary op for yd
884 20 : GRB_TRY(GxB_BinaryOp_new(&CreateCompareVec,
885 : F_BINARY(LG_MF_CreateCompareVec32), CompareTuple, ResultTuple, GrB_INT32,
886 : "LG_MF_CreateCompareVec32", CREATECOMPAREVEC_STR32));
887 :
888 : // create op to prune empty tuples
889 20 : GRB_TRY(GxB_IndexUnaryOp_new(&Prune,
890 : (GxB_index_unary_function) LG_MF_Prune32, GrB_BOOL, ResultTuple, GrB_BOOL,
891 : "LG_MF_Prune32", PRUNE_STR32));
892 :
893 : // create ops for mapping
894 20 : GRB_TRY(GxB_UnaryOp_new(&ExtractJ,
895 : F_UNARY(LG_MF_ExtractJ32), GrB_INT32, CompareTuple,
896 : "LG_MF_ExtractJ32", EXTRACTJ_STR32));
897 20 : GRB_TRY(GxB_UnaryOp_new(&ExtractYJ,
898 : F_UNARY(LG_MF_ExtractYJ32), GrB_INT32, ResultTuple,
899 : "LG_MF_ExtractYJ32", EXTRACTYJ_STR32));
900 :
901 : // create ops for Map*e semiring
902 20 : GRB_TRY(GxB_IndexBinaryOp_new(&MxeIndexMult,
903 : F_INDEX_BINARY(LG_MF_MxeMult32), ResultTuple, CompareTuple, GrB_FP64, GrB_BOOL,
904 : "LG_MF_MxeMult32", MXEMULT_STR32));
905 20 : GRB_TRY(GxB_BinaryOp_new_IndexOp(&MxeMult, MxeIndexMult, theta));
906 20 : GRB_TRY(GxB_BinaryOp_new(&MxeAdd,
907 : F_BINARY(LG_MF_MxeAdd32), ResultTuple, ResultTuple, ResultTuple,
908 : "LG_MF_MxeAdd32", MXEADD_STR32));
909 20 : GRB_TRY(GrB_Monoid_new_UDT(&MxeAddMonoid, MxeAdd, &id));
910 :
911 : // update height binary op
912 20 : GRB_TRY(GxB_BinaryOp_new(&Relabel,
913 : F_BINARY(LG_MF_Relabel32), GrB_INT32, GrB_INT32, ResultTuple,
914 : "LG_MF_Relabel32", RELABEL_STR32));
915 : }
916 :
917 : //----------------------------------------------------------------------------
918 : // create remaining vectors, matrices, descriptor, and semirings
919 : //----------------------------------------------------------------------------
920 :
921 28 : GRB_TRY(GrB_Matrix_new(&Map, CompareTuple, n,n));
922 28 : GRB_TRY(GrB_Vector_new(&Jvec, Integer_Type, n));
923 28 : GRB_TRY(GrB_Vector_new(&yd, CompareTuple, n));
924 28 : GRB_TRY(GrB_Vector_new(&y, ResultTuple, n));
925 :
926 28 : GRB_TRY(GrB_Semiring_new(&RxdSemiring, RxdAddMonoid, RxdMult));
927 28 : GRB_TRY(GrB_Semiring_new(&MxeSemiring, MxeAddMonoid, MxeMult));
928 :
929 : // create descriptor for building the Map and Delta matrices
930 28 : GRB_TRY(GrB_Descriptor_new(&desc));
931 28 : GRB_TRY(GrB_set(desc, GxB_USE_INDICES, GxB_ROWINDEX_LIST));
932 :
933 : // create and init d vector
934 28 : GRB_TRY(GrB_Vector_new(&d, Integer_Type, n));
935 28 : GRB_TRY(GrB_assign(d, NULL, NULL, 0, GrB_ALL, n, NULL));
936 28 : GRB_TRY(GrB_assign(d, NULL, NULL, n, &src, 1, NULL));
937 :
938 : // create R, with no flow
939 28 : GRB_TRY(GrB_Matrix_new(&R, FlowEdge, n, n));
940 : // R = ResidualForward (A)
941 28 : GRB_TRY(GrB_apply(R, NULL, NULL, ResidualForward, A, NULL));
942 : // R<!struct(A)> = ResidualBackward (AT)
943 28 : GRB_TRY(GrB_apply(R, A, NULL, ResidualBackward, AT, GrB_DESC_SC));
944 :
945 : // initial global relabeling
946 28 : LG_TRY (LG_global_relabel (R, sink, src_and_sink, GetResidual, d, &lvl, msg)) ;
947 :
948 : // create excess vector e and initial flows from the src to its neighbors
949 : // e<struct(lvl)> = A (src,:)
950 28 : GRB_TRY(GrB_Vector_new(&e, GrB_FP64, n));
951 28 : GRB_TRY(GrB_extract(e, lvl, NULL, A, GrB_ALL, n, src, GrB_DESC_ST0));
952 28 : GrB_free(&lvl);
953 : // t = MakeFlow (e), where t(i) = (0, e(i))
954 28 : GRB_TRY(GrB_Vector_new(&t, FlowEdge, n));
955 28 : GRB_TRY(GrB_apply(t, NULL, NULL, MakeFlow, e, NULL));
956 : // R(src,:) = InitForw (R (src,:), t')
957 28 : GRB_TRY(GrB_assign(R, NULL, InitForw, t, src, GrB_ALL, n, NULL));
958 : // R(:,src) = InitBack (R (:,src), t)
959 28 : GRB_TRY(GrB_assign(R, NULL, InitBack, t, GrB_ALL, n, src, NULL));
960 28 : GrB_free(&t) ;
961 :
962 : // augment the maxflow with the initial flows from the src to its neighbors
963 28 : LG_TRY (LG_augment_maxflow (f, e, sink, src_and_sink, &n_active, msg)) ;
964 :
965 : //----------------------------------------------------------------------------
966 : // compute the max flow
967 : //----------------------------------------------------------------------------
968 :
969 556 : for (int64_t iter = 0 ; n_active > 0 ; iter++)
970 : {
971 :
972 : //--------------------------------------------------------------------------
973 : // Part 1: global relabeling
974 : //--------------------------------------------------------------------------
975 :
976 528 : if ((iter > 0) && (flow_mtx != NULL) && (iter % 12 == 0))
977 : {
978 14 : LG_TRY (LG_global_relabel (R, sink, src_and_sink, GetResidual, d, &lvl, msg)) ;
979 : // delete nodes in e that cannot be reached from the sink
980 : // e<!struct(lvl)> = empty scalar
981 14 : GrB_assign (e, lvl, NULL, empty, GrB_ALL, n, GrB_DESC_SC) ;
982 14 : GrB_free(&lvl);
983 14 : GRB_TRY(GrB_Vector_nvals(&n_active, e));
984 14 : if(n_active == 0) break;
985 : }
986 :
987 : //--------------------------------------------------------------------------
988 : // Part 2: deciding where to push
989 : //--------------------------------------------------------------------------
990 :
991 : // y<struct(e),replace> = R*d using the RxdSemiring
992 528 : GRB_TRY(GrB_mxv(y, e, NULL, RxdSemiring, R, d, GrB_DESC_RS));
993 :
994 : // remove empty tuples (0,inf,-1) from y
995 528 : GRB_TRY(GrB_select(y, NULL, NULL, Prune, y, 0, NULL));
996 :
997 : //--------------------------------------------------------------------------
998 : // Part 3: verifying the pushes
999 : //--------------------------------------------------------------------------
1000 :
1001 : // create Map matrix from pattern and values of yd
1002 : // yd = CreateCompareVec (y,d) using eWiseMult
1003 528 : GRB_TRY(GrB_eWiseMult(yd, NULL, NULL, CreateCompareVec, y, d, NULL));
1004 : // Jvec = ExtractJ (yd), where Jvec(i) = yd(i)->j
1005 528 : GRB_TRY(GrB_apply(Jvec, NULL, NULL, ExtractJ, yd, NULL));
1006 528 : GRB_TRY(GrB_Matrix_clear(Map));
1007 528 : GRB_TRY(GrB_Matrix_build(Map, yd, Jvec, yd, GxB_IGNORE_DUP, desc));
1008 :
1009 : // make e dense for Map computation
1010 : // TODO: consider keeping e in bitmap/full format only,
1011 : // or always full with e(i)=0 denoting a non-active node.
1012 528 : GRB_TRY(GrB_assign(e, e, NULL, 0, GrB_ALL, n, GrB_DESC_SC));
1013 :
1014 : // y = Map*e using the MxeSemiring
1015 528 : GRB_TRY(GrB_mxv(y, NULL, NULL, MxeSemiring, Map, e, NULL));
1016 :
1017 : // remove empty tuples (0,inf,-1) from y
1018 528 : GRB_TRY(GrB_select(y, NULL, NULL, Prune, y, -1, NULL));
1019 :
1020 : // relabel, updating the height/label vector d
1021 : // d<struct(y)> = Relabel (d, y) using eWiseMult
1022 528 : GRB_TRY(GrB_eWiseMult(d, y, NULL, Relabel, d, y, GrB_DESC_S));
1023 :
1024 : #ifdef DBG
1025 : // assert invariant for all labels
1026 : GRB_TRY(GrB_eWiseMult(invariant, y, NULL, CheckInvariant, d, y, GrB_DESC_RS));
1027 : GRB_TRY(GrB_reduce(check, NULL, GrB_LAND_MONOID_BOOL, invariant, NULL));
1028 : GRB_TRY(GrB_Scalar_extractElement(&check_raw, check));
1029 : ASSERT(check_raw == true);
1030 : #endif
1031 :
1032 : //--------------------------------------------------------------------------
1033 : // Part 4: executing the pushes
1034 : //--------------------------------------------------------------------------
1035 :
1036 : // extract residual flows from y
1037 : // delta_vec = ResidualFlow (y), obtaining just the residual flows
1038 528 : GRB_TRY(GrB_apply(delta_vec, NULL, NULL, ResidualFlow, y, NULL));
1039 :
1040 : // delta_vec = min (delta_vec, e), where e is dense
1041 528 : GRB_TRY(GrB_eWiseMult(delta_vec, NULL, NULL, GrB_MIN_FP64, delta_vec, e, NULL));
1042 :
1043 : // create the Delta matrix from delta_vec and y
1044 : // note that delta_vec has the same structure as y
1045 : // Jvec = ExtractYJ (y), where Jvec(i) = y(i)->j
1046 528 : GRB_TRY(GrB_apply(Jvec, NULL, NULL, ExtractYJ, y, NULL));
1047 528 : GRB_TRY(GrB_Matrix_clear(Delta));
1048 528 : GRB_TRY(GrB_Matrix_build(Delta, delta_vec, Jvec, delta_vec, GxB_IGNORE_DUP, desc));
1049 :
1050 : // make Delta anti-symmetric
1051 : // Delta = (Delta - Delta')
1052 528 : GRB_TRY(GxB_eWiseUnion(Delta, NULL, NULL, GrB_MINUS_FP64, Delta, zero, Delta, zero, GrB_DESC_T1));
1053 :
1054 : // update R
1055 : // R<Delta> = UpdateFlow (R, Delta) using eWiseMult
1056 528 : GRB_TRY(GrB_eWiseMult(R, Delta, NULL, UpdateFlow, R, Delta, GrB_DESC_S));
1057 :
1058 : // reduce Delta to delta_vec
1059 : // delta_vec = sum (Delta), summing up each row of Delta
1060 528 : GRB_TRY(GrB_reduce(delta_vec, NULL, NULL, GrB_PLUS_FP64, Delta, GrB_DESC_T0));
1061 :
1062 : // add delta_vec to e
1063 : // e<struct(delta_vec)> += delta_vec
1064 528 : GRB_TRY(GrB_assign(e, delta_vec, GrB_PLUS_FP64, delta_vec, GrB_ALL, n, GrB_DESC_S));
1065 :
1066 : // augment maxflow for all active nodes
1067 528 : LG_TRY (LG_augment_maxflow (f, e, sink, src_and_sink, &n_active, msg)) ;
1068 : }
1069 :
1070 : //----------------------------------------------------------------------------
1071 : // optionally construct the output flow matrix, if requested
1072 : //----------------------------------------------------------------------------
1073 :
1074 28 : if (flow_mtx != NULL)
1075 : {
1076 : // flow_mtx = ExtractMatrixFlow (R)
1077 14 : GRB_TRY(GrB_Matrix_new(flow_mtx, GrB_FP64, n, n));
1078 14 : GRB_TRY(GrB_apply(*flow_mtx, NULL, NULL, ExtractMatrixFlow, R, NULL));
1079 : // delete any zero or negative flows from the flow_mtx
1080 14 : GRB_TRY(GrB_select(*flow_mtx, NULL, NULL, GrB_VALUEGT_FP64, *flow_mtx, 0, NULL));
1081 : }
1082 :
1083 : //----------------------------------------------------------------------------
1084 : // for test coverage only
1085 : //----------------------------------------------------------------------------
1086 :
1087 : #ifdef COVERAGE
1088 : // The MxeAdd operator is not tested via the call to GrB_mxv with the
1089 : // MxeSemiring above, so test it via the MxeAddMonoid.
1090 28 : GrB_free(&y);
1091 28 : GRB_TRY(GrB_Vector_new(&y, ResultTuple, 3));
1092 28 : if (n > NBIG)
1093 : {
1094 8 : LG_MF_resultTuple64 a = {.d = 1, .j = 2, .residual = 3};
1095 8 : LG_MF_resultTuple64 b = {.d = 4, .j = 5, .residual = 6};
1096 8 : GRB_TRY (GrB_Vector_setElement_UDT (y, (void *) &a, 0)) ;
1097 8 : GRB_TRY (GrB_Vector_setElement_UDT (y, (void *) &b, 0)) ;
1098 8 : LG_MF_resultTuple64 c = {.d = 0, .j = 0, .residual = 0};
1099 8 : GRB_TRY (GrB_Vector_reduce_UDT ((void *) &c, NULL, MxeAddMonoid, y, NULL)) ;
1100 8 : LG_ASSERT ((c.residual == 6 && c.j == 5 && c.d == 4), GrB_PANIC) ;
1101 : }
1102 : else
1103 : {
1104 20 : LG_MF_resultTuple32 a = {.d = 1, .j = 2, .residual = 3};
1105 20 : LG_MF_resultTuple32 b = {.d = 4, .j = 5, .residual = 6};
1106 20 : GRB_TRY (GrB_Vector_setElement_UDT (y, (void *) &a, 0)) ;
1107 20 : GRB_TRY (GrB_Vector_setElement_UDT (y, (void *) &b, 0)) ;
1108 20 : LG_MF_resultTuple32 c = {.d = 0, .j = 0, .residual = 0};
1109 20 : GRB_TRY (GrB_Vector_reduce_UDT ((void *) &c, NULL, MxeAddMonoid, y, NULL)) ;
1110 20 : LG_ASSERT ((c.residual == 6 && c.j == 5 && c.d == 4), GrB_PANIC) ;
1111 : }
1112 : #endif
1113 :
1114 : //----------------------------------------------------------------------------
1115 : // free workspace and return result
1116 : //----------------------------------------------------------------------------
1117 :
1118 28 : LG_FREE_WORK ;
1119 28 : return GrB_SUCCESS;
1120 : #else
1121 : return GrB_NOT_IMPLEMENTED ;
1122 : #endif
1123 : }
|