LCOV - code coverage report
Current view: top level - experimental/algorithm - LAGraph_dnn.c (source / functions) Hit Total Coverage
Test: LAGraph code coverage report. Commit id: 3b461aa. Current time (UTC): 2024-01-25T16:04:32Z Lines: 16 16 100.0 %
Date: 2024-01-25 16:05:28 Functions: 1 1 100.0 %

          Line data    Source code
       1             : //------------------------------------------------------------------------------
       2             : // LAGraph_dnn: sparse deep neural network
       3             : //------------------------------------------------------------------------------
       4             : 
       5             : // LAGraph, (c) 2019-2022 by The LAGraph Contributors, All Rights Reserved.
       6             : // SPDX-License-Identifier: BSD-2-Clause
       7             : //
       8             : // For additional details (including references to third party source code and
       9             : // other files) see the LICENSE file or contact permission@sei.cmu.edu. See
      10             : // Contributors.txt for a full list of contributors. Created, in part, with
      11             : // funding and support from the U.S. Government (see Acknowledgments.txt file).
      12             : // DM22-0790
      13             : 
      14             : // Contributed by Timothy A. Davis, Texas A&M University
      15             : 
      16             : //------------------------------------------------------------------------------
      17             : 
      18             : // LAGraph_dnn: sparse deep neural network.
      19             : // Based on inferenceReLUvec.m by Jeremy Kepner, MIT.
      20             : 
      21             : // Performs ReLU inference using input feature vectors Y0.
      22             : 
      23             : // See http://graphchallenge.org/ for a description of the algorithm.
      24             : 
      25             : // On input, Y0 is the initial feature vectors, of size nfeatures-by-nneurons.
      26             : // This format uses the graph convention that A(i,j) is the edge (i,j).
      27             : // Each row of Y0 is a single feature.
      28             : 
      29             : // W is an array of size nlayers of sparse matrices.  Each W[layer] matrix has
      30             : // the same size: nneurons-by-nneurons.  W[layer] represents the DNN weights
      31             : // for that layer.
      32             : 
      33             : // The Bias[layer] matrices are diagonal, and the same size as W[layer].
      34             : 
      35             : // All matrices should have type GrB_FP32; the method will be very slow
      36             : // otherwise.
      37             : 
      38             : // On output, Y is the computed result, of the same size as Y0.
      39             : 
      40             : #define LG_FREE_ALL         \
      41             : {                           \
      42             :     GrB_free (&Y) ;         \
      43             : }
      44             : 
      45             : #include "LG_internal.h"
      46             : #include "LAGraphX.h"
      47             : 
      48             : //****************************************************************************
      49           2 : GrB_Info LAGraph_dnn    // returns GrB_SUCCESS if successful
      50             : (
      51             :     // output
      52             :     GrB_Matrix *Yhandle,    // Y, created on output
      53             :     // input: not modified
      54             :     GrB_Matrix *W,      // W [0..nlayers-1], each nneurons-by-nneurons
      55             :     GrB_Matrix *Bias,   // Bias [0..nlayers-1], diagonal nneurons-by-nneurons
      56             :     int nlayers,        // # of layers
      57             :     GrB_Matrix Y0       // input features: nfeatures-by-nneurons
      58             : )
      59             : {
      60             :     GrB_Info info ;
      61           2 :     char *msg = NULL ;
      62             : 
      63             :     //--------------------------------------------------------------------------
      64             :     // check inputs
      65             :     //--------------------------------------------------------------------------
      66             : 
      67           2 :     if (Yhandle == NULL || W == NULL || Bias == NULL || Y0 == NULL)
      68             :     {
      69           1 :         return (GrB_NULL_POINTER) ;
      70             :     }
      71             : 
      72             :     //--------------------------------------------------------------------------
      73             :     // create the output matrix Y
      74             :     //--------------------------------------------------------------------------
      75             : 
      76           1 :     GrB_Matrix Y = NULL ;
      77           1 :     (*Yhandle) = NULL ;
      78             :     GrB_Index nfeatures, nneurons ;
      79           1 :     GRB_TRY (GrB_Matrix_nrows (&nfeatures, Y0)) ;
      80           1 :     GRB_TRY (GrB_Matrix_ncols (&nneurons,  Y0)) ;
      81           1 :     GRB_TRY (GrB_Matrix_new (&Y, GrB_FP32, nfeatures, nneurons)) ;
      82             : 
      83             :     //--------------------------------------------------------------------------
      84             :     // propagate the features through the neuron layers
      85             :     //--------------------------------------------------------------------------
      86             : 
      87          31 :     for (int layer = 0 ; layer < nlayers ; layer++)
      88             :     {
      89             :         // Y = Y * W [layer], using the conventional PLUS_TIMES semiring
      90          30 :         GRB_TRY (GrB_mxm (Y, NULL, NULL, GrB_PLUS_TIMES_SEMIRING_FP32,
      91             :             ((layer == 0) ? Y0 : Y), W [layer], NULL)) ;
      92             : 
      93             :         // Y = Y * Bias [layer], using the MIN_PLUS semiring.  This computes
      94             :         // Y(i,j) += Bias [layer] (j,j) for each entry Y(i,j).  It does not
      95             :         // introduce any new entries in Y.  The MIN monoid is not actually used
      96             :         // since Bias [layer] is a diagonal matrix.  The prior version used
      97             :         // a PLUS_PLUS semiring, which also works but is not a GrB built-in.
      98          30 :         GRB_TRY (GrB_mxm (Y, NULL, NULL, GrB_MIN_PLUS_SEMIRING_FP32, Y,
      99             :             Bias [layer], NULL)) ;
     100             : 
     101             :         // delete entries from Y: keep only those entries greater than zero
     102          30 :         GRB_TRY (GrB_select (Y, NULL, NULL, GrB_VALUEGT_FP32, Y, (float) 0,
     103             :             NULL));
     104             : 
     105             :         // threshold maximum values: Y = min (Y, 32)
     106          30 :         GRB_TRY (GrB_apply (Y, NULL, NULL, GrB_MIN_FP32, Y, (float) 32, NULL)) ;
     107             :     }
     108             : 
     109             :     //--------------------------------------------------------------------------
     110             :     // return result
     111             :     //--------------------------------------------------------------------------
     112             : 
     113           1 :     (*Yhandle) = Y ;
     114           1 :     return (GrB_SUCCESS) ;
     115             : }

Generated by: LCOV version 1.14