Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:37:19

0001 // Copyright (C) 2004-2006 The Trustees of Indiana University.
0002 // Copyright (C) 2002 Brad King and Douglas Gregor
0003 
0004 // Use, modification and distribution is subject to the Boost Software
0005 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
0006 // http://www.boost.org/LICENSE_1_0.txt)
0007 
0008 //  Authors: Douglas Gregor
0009 //           Andrew Lumsdaine
0010 //           Brian Barrett
0011 #ifndef BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
0012 #define BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
0013 
0014 #ifndef BOOST_GRAPH_USE_MPI
0015 #error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
0016 #endif
0017 
0018 #include <boost/assert.hpp>
0019 #include <boost/graph/overloading.hpp>
0020 #include <boost/graph/page_rank.hpp>
0021 #include <boost/graph/distributed/concepts.hpp>
0022 #include <boost/property_map/parallel/distributed_property_map.hpp>
0023 #include <boost/property_map/parallel/caching_property_map.hpp>
0024 #include <boost/graph/parallel/algorithm.hpp>
0025 #include <boost/graph/parallel/container_traits.hpp>
0026 
0027 // #define WANT_MPI_ONESIDED 1
0028 
0029 namespace boost { namespace graph { namespace distributed {
0030 
0031 namespace detail {
0032 #ifdef WANT_MPI_ONESIDED
0033   template<typename Graph, typename RankMap, typename owner_map_t>
0034   void page_rank_step(const Graph& g, RankMap from_rank, MPI_Win to_win, 
0035                       typename property_traits<RankMap>::value_type damping,
0036                       owner_map_t owner)
0037   {
0038     typedef typename property_traits<RankMap>::value_type rank_type;
0039     int me, ret;
0040     MPI_Comm_rank(MPI_COMM_WORLD, &me);
0041 
0042     // MPI_Accumulate is not required to store the value of the data
0043     // being sent, only the address.  The value of the memory location
0044     // must not change until the end of the access epoch, meaning the
0045     // call to MPI_Fence.  We therefore store the updated value back
0046     // into the from_rank map before the accumulate rather than using
0047     // a temporary.  We're going to reset the values in the from_rank
0048     // before the end of page_rank_step() anyway, so this isn't a huge
0049     // deal.  But MPI-2 One-sided is an abomination.
0050     BGL_FORALL_VERTICES_T(u, g, Graph) {
0051       put(from_rank, u, (damping * get(from_rank, u) / out_degree(u, g)));
0052       BGL_FORALL_ADJ_T(u, v, g, Graph) {
0053         ret = MPI_Accumulate(&(from_rank[u]),
0054                              1, MPI_DOUBLE,
0055                              get(owner, v), local(v), 
0056                              1, MPI_DOUBLE, MPI_SUM, to_win);
0057         BOOST_ASSERT(MPI_SUCCESS == ret);
0058       }
0059     }
0060     MPI_Win_fence(0, to_win);
0061 
0062     // Set new rank maps for the other map.  Do this now to get around
0063     // the stupid synchronization rules of MPI-2 One-sided
0064     BGL_FORALL_VERTICES_T(v, g, Graph) put(from_rank, v, rank_type(1 - damping));
0065   }
0066 #endif
0067 
0068   template<typename T>
0069   struct rank_accumulate_reducer {
0070     BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
0071 
0072     template<typename K>
0073     T operator()(const K&) const { return T(0); }
0074 
0075     template<typename K>
0076     T operator()(const K&, const T& x, const T& y) const { return x + y; }
0077   };
0078 } // end namespace detail
0079 
0080 template<typename Graph, typename RankMap, typename Done, typename RankMap2>
0081 void
0082 page_rank_impl(const Graph& g, RankMap rank_map, Done done, 
0083                typename property_traits<RankMap>::value_type damping,
0084                typename graph_traits<Graph>::vertices_size_type n,
0085                RankMap2 rank_map2)
0086 {
0087   typedef typename property_traits<RankMap>::value_type rank_type;
0088 
0089   int me;
0090   MPI_Comm_rank(MPI_COMM_WORLD, &me);
0091 
0092   typename property_map<Graph, vertex_owner_t>::const_type
0093     owner = get(vertex_owner, g);
0094   (void)owner;
0095 
0096   typedef typename boost::graph::parallel::process_group_type<Graph>
0097     ::type process_group_type;
0098   typedef typename process_group_type::process_id_type process_id_type;
0099 
0100   process_group_type pg = process_group(g);
0101   process_id_type id = process_id(pg);
0102 
0103   BOOST_ASSERT(me == id);
0104 
0105   rank_type initial_rank = rank_type(rank_type(1) / n);
0106   BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, initial_rank);
0107 
0108 #ifdef WANT_MPI_ONESIDED
0109 
0110   BOOST_ASSERT(sizeof(rank_type) == sizeof(double));
0111 
0112   bool to_map_2 = true;
0113   MPI_Win win, win2;
0114 
0115   MPI_Win_create(&(rank_map[*(vertices(g).first)]), 
0116                  sizeof(double) * num_vertices(g),
0117                  sizeof(double), 
0118                  MPI_INFO_NULL, MPI_COMM_WORLD, &win);
0119   MPI_Win_set_name(win, "rank_map_win");
0120   MPI_Win_create(&(rank_map2[*(vertices(g).first)]), 
0121                  sizeof(double) * num_vertices(g),
0122                  sizeof(double), 
0123                  MPI_INFO_NULL, MPI_COMM_WORLD, &win2);
0124   MPI_Win_set_name(win, "rank_map2_win");
0125 
0126   // set initial rank maps for the first iteration...
0127   BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map2, v, rank_type(1 - damping));
0128 
0129   MPI_Win_fence(0, win);
0130   MPI_Win_fence(0, win2);
0131 
0132   while ((to_map_2 && !done(rank_map, g)) ||
0133          (!to_map_2 && !done(rank_map2, g))) {
0134     if (to_map_2) {
0135       graph::distributed::detail::page_rank_step(g, rank_map, win2, damping, owner);
0136       to_map_2 = false;
0137     } else {
0138       graph::distributed::detail::page_rank_step(g, rank_map2, win, damping, owner);
0139       to_map_2 = true;
0140     }
0141   }
0142   synchronize(boost::graph::parallel::process_group(g));
0143 
0144   MPI_Win_free(&win);
0145   MPI_Win_free(&win2);
0146 
0147 #else  
0148   // The ranks accumulate after each step.
0149   rank_map.set_reduce(detail::rank_accumulate_reducer<rank_type>());
0150   rank_map2.set_reduce(detail::rank_accumulate_reducer<rank_type>());
0151   rank_map.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
0152   rank_map2.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
0153 
0154   bool to_map_2 = true;
0155   while ((to_map_2 && !done(rank_map, g)) ||
0156          (!to_map_2 && !done(rank_map2, g))) {
0157     /**
0158      * PageRank can implemented slightly more efficiently on a
0159      * bidirectional graph than on an incidence graph. However,
0160      * distributed PageRank requires that we have the rank of the
0161      * source vertex available locally, so we force the incidence
0162      * graph implementation, which pushes rank from source to
0163      * target.
0164      */
0165     typedef incidence_graph_tag category;
0166     if (to_map_2) {
0167       graph::detail::page_rank_step(g, rank_map, rank_map2, damping,
0168                                     category());
0169       to_map_2 = false;
0170     } else {
0171       graph::detail::page_rank_step(g, rank_map2, rank_map, damping,
0172                                     category());
0173       to_map_2 = true;
0174     }
0175     using boost::graph::parallel::process_group;
0176     synchronize(process_group(g));
0177   }
0178 
0179   rank_map.reset();
0180 #endif
0181       
0182   if (!to_map_2)
0183     BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, get(rank_map2, v));
0184 }
0185 
0186 template<typename Graph, typename RankMap, typename Done, typename RankMap2>
0187 void
0188 page_rank(const Graph& g, RankMap rank_map, Done done, 
0189           typename property_traits<RankMap>::value_type damping,
0190           typename graph_traits<Graph>::vertices_size_type n,
0191           RankMap2 rank_map2
0192           BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag)) 
0193 {
0194   (page_rank_impl)(g, rank_map, done, damping, n, rank_map2);
0195 }
0196 
0197 template<typename MutableGraph>
0198 void
0199 remove_dangling_links(MutableGraph& g
0200                       BOOST_GRAPH_ENABLE_IF_MODELS_PARM(MutableGraph, 
0201                                                         distributed_graph_tag))
0202 {
0203   typename graph_traits<MutableGraph>::vertices_size_type old_n;
0204   do {
0205     old_n = num_vertices(g);
0206 
0207     typename graph_traits<MutableGraph>::vertex_iterator vi, vi_end;
0208     for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; /* in loop */) {
0209       typename graph_traits<MutableGraph>::vertex_descriptor v = *vi++;
0210       if (out_degree(v, g) == 0) {
0211         clear_vertex(v, g);
0212         remove_vertex(v, g);
0213       }
0214     }
0215   } while (num_vertices(g) < old_n);
0216 }
0217 
0218 } // end namespace distributed
0219 
0220 using distributed::page_rank;
0221 using distributed::remove_dangling_links;
0222 
0223 } } // end namespace boost::graph
0224 
0225 #endif // BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP