Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:50:15

0001 // Copyright (C) 2004-2008 The Trustees of Indiana University.
0002 
0003 // Use, modification and distribution is subject to the Boost Software
0004 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
0005 // http://www.boost.org/LICENSE_1_0.txt)
0006 
0007 //  Authors: Douglas Gregor
0008 //           Nick Edmonds
0009 //           Andrew Lumsdaine
0010 
0011 // The placement of this #include probably looks very odd relative to
0012 // the #ifndef/#define pair below. However, this placement is
0013 // extremely important to allow the various property map headers to be
0014 // included in any order.
0015 #include <boost/property_map/property_map.hpp>
0016 
0017 #ifndef BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP
0018 #define BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP
0019 
0020 #include <boost/assert.hpp>
0021 #include <boost/type_traits/is_base_and_derived.hpp>
0022 #include <boost/shared_ptr.hpp>
0023 #include <boost/weak_ptr.hpp>
0024 #include <boost/optional.hpp>
0025 #include <boost/property_map/parallel/process_group.hpp>
0026 #include <boost/function/function1.hpp>
0027 #include <vector>
0028 #include <set>
0029 #include <boost/property_map/parallel/basic_reduce.hpp>
0030 #include <boost/property_map/parallel/detail/untracked_pair.hpp>
0031 #include <boost/type_traits/is_same.hpp>
0032 #include <boost/property_map/parallel/local_property_map.hpp>
0033 #include <map>
0034 #include <boost/version.hpp>
0035 #include <boost/property_map/parallel/unsafe_serialize.hpp>
0036 #include <boost/multi_index_container.hpp>
0037 #include <boost/multi_index/hashed_index.hpp>
0038 #include <boost/multi_index/member.hpp>
0039 #include <boost/multi_index/sequenced_index.hpp>
0040 
0041 // Serialization functions for constructs we use
0042 #include <boost/serialization/utility.hpp>
0043 
0044 namespace boost { namespace parallel {
0045 
0046 namespace detail {
0047   /**************************************************************************
0048    * Metafunction that degrades an Lvalue Property Map category tag to
0049    * a Read Write Property Map category tag.
0050    **************************************************************************/
0051   template<bool IsLvaluePropertyMap>
0052   struct make_nonlvalue_property_map
0053   {
0054     template<typename T> struct apply { typedef T type; };
0055   };
0056 
0057   template<>
0058   struct make_nonlvalue_property_map<true>
0059   {
0060     template<typename>
0061     struct apply
0062     {
0063       typedef read_write_property_map_tag type;
0064     };
0065   };
0066 
0067   /**************************************************************************
0068    * Performs a "put" on a property map so long as the property map is
0069    * a Writable Property Map or a mutable Lvalue Property Map. This
0070    * is required because the distributed property map's message
0071    * handler handles "put" messages even for a const property map,
0072    * although receipt of a "put" message is ill-formed.
0073    **************************************************************************/
0074   template<bool IsLvaluePropertyMap>
0075   struct maybe_put_in_lvalue_pm
0076   {
0077     template<typename PropertyMap, typename Key, typename Value>
0078     static inline void
0079     do_put(PropertyMap, const Key&, const Value&)
0080     { BOOST_ASSERT(false); }
0081   };
0082 
0083   template<>
0084   struct maybe_put_in_lvalue_pm<true>
0085   {
0086     template<typename PropertyMap, typename Key, typename Value>
0087     static inline void
0088     do_put(PropertyMap pm, const Key& key, const Value& value)
0089     {
0090       using boost::put;
0091 
0092       put(pm, key, value);
0093     }
0094   };
0095 
0096   template<typename PropertyMap, typename Key, typename Value>
0097   inline void
0098   maybe_put_impl(PropertyMap pm, const Key& key, const Value& value,
0099                  writable_property_map_tag)
0100   {
0101     using boost::put;
0102 
0103     put(pm, key, value);
0104   }
0105 
0106   template<typename PropertyMap, typename Key, typename Value>
0107   inline void
0108   maybe_put_impl(PropertyMap pm, const Key& key, const Value& value,
0109                  lvalue_property_map_tag)
0110   {
0111     typedef typename property_traits<PropertyMap>::value_type value_type;
0112     typedef typename property_traits<PropertyMap>::reference reference;
0113     // DPG TBD: Some property maps are improperly characterized as
0114     // lvalue_property_maps, when in fact they do not provide true
0115     // references. The most typical example is those property maps
0116     // built from vector<bool> and its iterators, which deal with
0117     // proxies. We don't want to mischaracterize these as not having a
0118     // "put" operation, so we only consider an lvalue_property_map as
0119     // constant if its reference is const value_type&. In fact, this
0120     // isn't even quite correct (think of a
0121     // vector<bool>::const_iterator), but at present C++ doesn't
0122     // provide us with any alternatives.
0123     typedef is_same<const value_type&, reference> is_constant;
0124 
0125     maybe_put_in_lvalue_pm<(!is_constant::value)>::do_put(pm, key, value);
0126   }
0127 
0128   template<typename PropertyMap, typename Key, typename Value>
0129   inline void
0130   maybe_put_impl(PropertyMap, const Key&, const Value&, ...)
0131   { BOOST_ASSERT(false); }
0132 
0133   template<typename PropertyMap, typename Key, typename Value>
0134   inline void
0135   maybe_put(PropertyMap pm, const Key& key, const Value& value)
0136   {
0137     maybe_put_impl(pm, key, value,
0138                    typename property_traits<PropertyMap>::category());
0139   }
0140 } // end namespace detail
0141 
0142 /** The consistency model used by the distributed property map. */
0143 enum consistency_model {
0144   cm_forward = 1 << 0,
0145   cm_backward = 1 << 1,
0146   cm_bidirectional = cm_forward | cm_backward,
0147   cm_flush = 1 << 2,
0148   cm_reset = 1 << 3,
0149   cm_clear = 1 << 4
0150 };
0151 
0152 /** Distributed property map adaptor.
0153  *
0154  *  The distributed property map adaptor is a property map whose
0155  *  stored values are distributed across multiple non-overlapping
0156  *  memory spaces on different processes. Values local to the current
0157  *  process are stored within a local property map and may be
0158  *  immediately accessed via @c get and @c put. Values stored on
0159  *  remote processes may also be access via @c get and @c put, but the
0160  *  behavior differs slightly:
0161  *
0162  *  - @c put operations update a local ghost cell and send a "put"
0163  *    message to the process that owns the value. The owner is free to
0164  *    update its own "official" value or may ignore the put request.
0165  *
0166  *  - @c get operations returns the contents of the local ghost
0167  *    cell. If no ghost cell is available, one is created using the
0168  *    default value provided by the "reduce" operation. See, e.g.,
0169  *    @ref basic_reduce and @ref property_reduce.
0170  *
0171  * Using distributed property maps requires a bit more care than using
0172  * local, sequential property maps. While the syntax and semantics are
0173  * similar, distributed property maps may contain out-of-date
0174  * information that can only be guaranteed to be synchronized by
0175  * calling the @ref synchronize function in all processes.
0176  *
0177  * To address the issue of out-of-date values, distributed property
0178  * maps are supplied with a reduction operation. The reduction
0179  * operation has two roles:
0180  *
0181  *   -# When a value is needed for a remote key but no value is
0182  *      immediately available, the reduction operation provides a
0183  *      suitable default. For instance, a distributed property map
0184  *      storing distances may have a reduction operation that returns
0185  *      an infinite value as the default, whereas a distributed
0186  *      property map for vertex colors may return white as the
0187  *      default.
0188  *
0189  *   -# When a value is received from a remote process, the process
0190  *      owning the key associated with that value must determine which
0191  *      value---the locally stored value, the value received from a
0192  *      remote process, or some combination of the two---will be
0193  *      stored as the "official" value in the property map. The
0194  *      reduction operation transforms the local and remote values
0195  *      into the "official" value to be stored.
0196  *
0197  * @tparam ProcessGroup the type of the process group over which the
0198  * property map is distributed and is also the medium for
0199  * communication.
0200  *
0201  * @tparam StorageMap the type of the property map that will
0202  * store values for keys local to this processor. The @c value_type of
0203  * this property map will become the @c value_type of the distributed
0204  * property map. The distributed property map models the same property
0205  * map concepts as the @c LocalPropertyMap, with one exception: a
0206  * distributed property map cannot be an LvaluePropertyMap (because
0207  * remote values are not addressable), and is therefore limited to
0208  * ReadWritePropertyMap.
0209  */
0210 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0211 class distributed_property_map
0212 {
0213  public:
0214   /// The key type of the property map.
0215   typedef typename property_traits<GlobalMap>::key_type key_type;
0216 
0217   /// The value type of the property map.
0218   typedef typename property_traits<StorageMap>::value_type value_type;
0219   typedef typename property_traits<StorageMap>::reference  reference;
0220   typedef ProcessGroup                        process_group_type;
0221 
0222  private:
0223   typedef distributed_property_map            self_type;
0224   typedef typename property_traits<StorageMap>::category local_category;
0225   typedef typename property_traits<StorageMap>::key_type local_key_type;
0226   typedef typename property_traits<GlobalMap>::value_type owner_local_pair;
0227   typedef typename ProcessGroup::process_id_type process_id_type;
0228 
0229   enum property_map_messages {
0230     /** A request to store a value in a property map. The message
0231      * contains a std::pair<key, data>.
0232      */
0233     property_map_put,
0234 
0235     /** A request to retrieve a particular value in a property
0236      *  map. The message contains a key. The owner of that key will
0237      *  reply with a value.
0238      */
0239     property_map_get,
0240 
0241     /** A request to update values stored on a remote processor. The
0242      * message contains a vector of keys for which the source
0243      * requests updated values. This message will only be transmitted
0244      * during synchronization.
0245      */
0246     property_map_multiget,
0247 
0248     /** A request to store values in a ghost cell. This message
0249      * contains a vector of key/value pairs corresponding to the
0250      * sequence of keys sent to the source processor.
0251      */
0252     property_map_multiget_reply,
0253 
0254     /** The payload containing a vector of local key-value pairs to be
0255      * put into the remote property map. A key-value std::pair will be
0256      * used to store each local key-value pair.
0257      */
0258     property_map_multiput
0259   };
0260 
0261   // Code from Joaquín M López Muñoz to work around unusual implementation of
0262   // std::pair in VC++ 10:
0263   template<typename First,typename Second>
0264   class pair_first_extractor {
0265     typedef std::pair<First,Second> value_type;
0266 
0267     public:
0268     typedef First result_type;
0269     const result_type& operator()(const value_type& x) const {
0270       return x.first;
0271     }
0272 
0273     result_type& operator()(value_type& x) const {
0274       return x.first;
0275     }
0276   };
0277 
0278  public:
0279   /// The type of the ghost cells
0280   typedef multi_index::multi_index_container<
0281             std::pair<key_type, value_type>,
0282             multi_index::indexed_by<
0283               multi_index::sequenced<>,
0284               multi_index::hashed_unique<
0285                 pair_first_extractor<key_type, value_type>
0286               >
0287             >
0288           > ghost_cells_type;
0289 
0290   /// Iterator into the ghost cells
0291   typedef typename ghost_cells_type::iterator iterator;
0292 
0293   /// Key-based index into the ghost cells
0294   typedef typename ghost_cells_type::template nth_index<1>::type
0295     ghost_cells_key_index_type;
0296 
0297   /// Iterator into the ghost cells (by key)
0298   typedef typename ghost_cells_key_index_type::iterator key_iterator;
0299 
0300   /** The property map category.  A distributed property map cannot be
0301    * an Lvalue Property Map, because values on remote processes cannot
0302    * be addresses.
0303    */
0304   typedef typename detail::make_nonlvalue_property_map<
0305     (is_base_and_derived<lvalue_property_map_tag, local_category>::value
0306      || is_same<lvalue_property_map_tag, local_category>::value)>
0307     ::template apply<local_category>::type category;
0308 
0309   /** Default-construct a distributed property map.  This function
0310    * creates an initialized property map that must be assigned to a
0311    * valid value before being used. It is only provided here because
0312    * property maps must be Default Constructible.
0313    */
0314   distributed_property_map() {}
0315 
0316   /** Construct a distributed property map.  Builds a distributed
0317    * property map communicating over the given process group and using
0318    * the given local property map for storage. Since no reduction
0319    * operation is provided, the default reduction operation @c
0320    * basic_reduce<value_type> is used.
0321    */
0322   distributed_property_map(const ProcessGroup& pg, const GlobalMap& global,
0323                            const StorageMap& pm)
0324     : data(new data_t(pg, global, pm, basic_reduce<value_type>(), false))
0325   {
0326     typedef handle_message<basic_reduce<value_type> > Handler;
0327 
0328     data->ghost_cells.reset(new ghost_cells_type());
0329     Handler handler(data);
0330     data->process_group.replace_handler(handler, true);
0331     data->process_group.template get_receiver<Handler>()
0332       ->setup_triggers(data->process_group);
0333   }
0334 
0335   /** Construct a distributed property map.  Builds a distributed
0336    * property map communicating over the given process group and using
0337    * the given local property map for storage. The given @p reduce
0338    * parameter is used as the reduction operation.
0339    */
0340   template<typename Reduce>
0341   distributed_property_map(const ProcessGroup& pg, const GlobalMap& global,
0342                            const StorageMap& pm,
0343                            const Reduce& reduce);
0344 
0345   ~distributed_property_map();
0346 
0347   /// Set the reduce operation of the distributed property map.
0348   template<typename Reduce>
0349   void set_reduce(const Reduce& reduce);
0350 
0351   // Set the consistency model for the distributed property map
0352   void set_consistency_model(int model);
0353 
0354   // Get the consistency model
0355   int get_consistency_model() const { return data->model; }
0356 
0357   // Set the maximum number of ghost cells that we are allowed to
0358   // maintain. If 0, all ghost cells will be retained.
0359   void set_max_ghost_cells(std::size_t max_ghost_cells);
0360 
0361   // Clear out all ghost cells
0362   void clear();
0363 
0364   // Reset the values in all ghost cells to the default value
0365   void reset();
0366 
0367   // Flush all values destined for remote processors
0368   void flush();
0369 
0370   reference operator[](const key_type& key) const
0371   {
0372     owner_local_pair p = get(data->global, key);
0373 
0374     if (p.first == process_id(data->process_group)) {
0375       return data->storage[p.second];
0376     } else {
0377       return cell(key);
0378     }
0379   }
0380 
0381   process_group_type process_group() const
0382   {
0383     return data->process_group.base();
0384   }
0385 
0386   StorageMap&       base()       { return data->storage; }
0387   const StorageMap& base() const { return data->storage; }
0388 
0389   /** Sends a "put" request.
0390    * \internal
0391    *
0392    */
0393   void
0394   request_put(process_id_type p, const key_type& k, const value_type& v) const
0395   {
0396     send(data->process_group, p, property_map_put,
0397          boost::parallel::detail::make_untracked_pair(k, v));
0398   }
0399 
0400   /** Access the ghost cell for the given key.
0401    * \internal
0402    */
0403   value_type& cell(const key_type& k, bool request_if_missing = true) const;
0404 
0405   /** Perform synchronization
0406    * \internal
0407    */
0408   void do_synchronize();
0409 
0410   const GlobalMap& global() const { return data->global; }
0411   GlobalMap&       global()       { return data->global; }
0412 
0413   struct data_t
0414   {
0415     data_t(const ProcessGroup& pg, const GlobalMap& global,
0416            const StorageMap& pm, const function1<value_type, key_type>& dv,
0417            bool has_default_resolver)
0418       : process_group(pg), global(global), storage(pm),
0419         ghost_cells(), max_ghost_cells(1000000), get_default_value(dv),
0420         has_default_resolver(has_default_resolver), model(cm_forward) { }
0421 
0422     /// The process group
0423     ProcessGroup process_group;
0424 
0425     /// A mapping from the keys of this property map to the global
0426     /// descriptor.
0427     GlobalMap global;
0428 
0429     /// Local property map
0430     StorageMap storage;
0431 
0432     /// The ghost cells
0433     shared_ptr<ghost_cells_type> ghost_cells;
0434 
0435     /// The maximum number of ghost cells we are permitted to hold. If
0436     /// zero, we are permitted to have an infinite number of ghost
0437     /// cells.
0438     std::size_t max_ghost_cells;
0439 
0440     /// Default value for remote ghost cells, as defined by the
0441     /// reduction operation.
0442     function1<value_type, key_type> get_default_value;
0443 
0444     /// True if this resolver is the "default" resolver, meaning that
0445     /// we should not be able to get() a default value; it needs to be
0446     /// request()ed first.
0447     bool has_default_resolver;
0448 
0449     // Current consistency model
0450     int model;
0451 
0452     // Function that resets all of the ghost cells to their default
0453     // values. It knows the type of the resolver, so we can eliminate
0454     // a large number of calls through function pointers.
0455     void (data_t::*reset)();
0456 
0457     // Clear out all ghost cells
0458     void clear();
0459 
0460     // Flush all values destined for remote processors
0461     void flush();
0462 
0463     // Send out requests to "refresh" the values of ghost cells that
0464     // we're holding.
0465     void refresh_ghost_cells();
0466 
0467   private:
0468     template<typename Resolver> void do_reset();
0469 
0470     friend class distributed_property_map;
0471   };
0472   friend struct data_t;
0473 
0474   shared_ptr<data_t> data;
0475 
0476  private:
0477   // Prunes the least recently used ghost cells until we have @c
0478   // max_ghost_cells or fewer ghost cells.
0479   void prune_ghost_cells() const;
0480 
0481   /** Handles incoming messages.
0482    *
0483    * This function object is responsible for handling all incoming
0484    * messages for the distributed property map.
0485    */
0486   template<typename Reduce>
0487   struct handle_message
0488   {
0489     explicit handle_message(const shared_ptr<data_t>& data,
0490                             const Reduce& reduce = Reduce())
0491       : data_ptr(data), reduce(reduce) { }
0492 
0493     void operator()(process_id_type source, int tag);
0494 
0495     /// Individual message handlers
0496     void
0497     handle_put(int source, int tag,
0498                const boost::parallel::detail::untracked_pair<key_type, value_type>& data,
0499                trigger_receive_context);
0500 
0501     value_type
0502     handle_get(int source, int tag, const key_type& data,
0503                trigger_receive_context);
0504 
0505     void
0506     handle_multiget(int source, int tag,
0507                     const std::vector<key_type>& data,
0508                     trigger_receive_context);
0509 
0510     void
0511     handle_multiget_reply
0512       (int source, int tag,
0513        const std::vector<boost::parallel::detail::untracked_pair<key_type, value_type> >& msg,
0514        trigger_receive_context);
0515 
0516     void
0517     handle_multiput
0518       (int source, int tag,
0519        const std::vector<unsafe_pair<local_key_type, value_type> >& data,
0520        trigger_receive_context);
0521 
0522     void setup_triggers(process_group_type& pg);
0523 
0524   private:
0525     weak_ptr<data_t> data_ptr;
0526     Reduce reduce;
0527   };
0528 
0529   /* Sets up the next stage in a multi-stage synchronization, for
0530      bidirectional consistency. */
0531   struct on_synchronize
0532   {
0533     explicit on_synchronize(const shared_ptr<data_t>& data) : data_ptr(data) { }
0534 
0535     void operator()();
0536 
0537   private:
0538     weak_ptr<data_t> data_ptr;
0539   };
0540 };
0541 
0542 /* An implementation helper macro for the common case of naming
0543    distributed property maps with all of the normal template
0544    parameters. */
0545 #define PBGL_DISTRIB_PMAP                                       \
0546   distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
0547 
0548 /* Request that the value for the given remote key be retrieved in
0549    the next synchronization round. */
0550 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0551 inline void
0552 request(const PBGL_DISTRIB_PMAP& pm,
0553         typename PBGL_DISTRIB_PMAP::key_type const& key)
0554 {
0555   if (get(pm.data->global, key).first != process_id(pm.data->process_group))
0556     pm.cell(key, false);
0557 }
0558 
0559 /** Get the value associated with a particular key.  Retrieves the
0560  * value associated with the given key. If the key denotes a
0561  * locally-owned object, it returns the value from the local property
0562  * map; if the key denotes a remotely-owned object, retrieves the
0563  * value of the ghost cell for that key, which may be the default
0564  * value provided by the reduce operation.
0565  *
0566  * Complexity: For a local key, O(1) get operations on the underlying
0567  * property map. For a non-local key, O(1) accesses to the ghost cells.
0568  */
0569 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0570 inline
0571 typename PBGL_DISTRIB_PMAP::value_type
0572 get(const PBGL_DISTRIB_PMAP& pm,
0573     typename PBGL_DISTRIB_PMAP::key_type const& key)
0574 {
0575   using boost::get;
0576 
0577   typename property_traits<GlobalMap>::value_type p =
0578     get(pm.data->global, key);
0579 
0580   if (p.first == process_id(pm.data->process_group)) {
0581     return get(pm.data->storage, p.second);
0582   } else {
0583     return pm.cell(key);
0584   }
0585 }
0586 
0587 /** Put a value associated with the given key into the property map.
0588  * When the key denotes a locally-owned object, this operation updates
0589  * the underlying local property map. Otherwise, the local ghost cell
0590  * is updated and a "put" message is sent to the processor owning this
0591  * key.
0592  *
0593  * Complexity: For a local key, O(1) put operations on the underlying
0594  * property map. For a nonlocal key, O(1) accesses to the ghost cells
0595  * and will send O(1) messages of size O(sizeof(key) + sizeof(value)).
0596  */
0597 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0598 void
0599 put(const PBGL_DISTRIB_PMAP& pm,
0600     typename PBGL_DISTRIB_PMAP::key_type const & key,
0601     typename PBGL_DISTRIB_PMAP::value_type const & value)
0602 {
0603   using boost::put;
0604 
0605   typename property_traits<GlobalMap>::value_type p =
0606     get(pm.data->global, key);
0607 
0608   if (p.first == process_id(pm.data->process_group)) {
0609     put(pm.data->storage, p.second, value);
0610   } else {
0611     if (pm.data->model & cm_forward)
0612       pm.request_put(p.first, key, value);
0613 
0614     pm.cell(key, false) = value;
0615   }
0616 }
0617 
0618 /** Put a value associated with a given key into the local view of the
0619  * property map. This operation is equivalent to @c put, but with one
0620  * exception: no message will be sent to the owning processor in the
0621  * case of a remote update. The effect is that any value written via
0622  * @c local_put for a remote key may be overwritten in the next
0623  * synchronization round.
0624  */
0625 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0626 void
0627 local_put(const PBGL_DISTRIB_PMAP& pm,
0628           typename PBGL_DISTRIB_PMAP::key_type const & key,
0629           typename PBGL_DISTRIB_PMAP::value_type const & value)
0630 {
0631   using boost::put;
0632 
0633   typename property_traits<GlobalMap>::value_type p =
0634     get(pm.data->global, key);
0635 
0636   if (p.first == process_id(pm.data->process_group))
0637     put(pm.data->storage, p.second, value);
0638   else pm.cell(key, false) = value;
0639 }
0640 
0641 /** Cache the value associated with the given remote key. If the key
0642  *  is local, ignore the operation. */
0643 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0644 inline void
0645 cache(const PBGL_DISTRIB_PMAP& pm,
0646       typename PBGL_DISTRIB_PMAP::key_type const & key,
0647       typename PBGL_DISTRIB_PMAP::value_type const & value)
0648 {
0649   typename ProcessGroup::process_id_type id = get(pm.data->global, key).first;
0650 
0651   if (id != process_id(pm.data->process_group)) pm.cell(key, false) = value;
0652 }
0653 
0654 /// Synchronize the property map.
0655 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0656 void
0657 synchronize(PBGL_DISTRIB_PMAP& pm)
0658 {
0659   pm.do_synchronize();
0660 }
0661 
0662 /// Create a distributed property map.
0663 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
0664 inline distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
0665 make_distributed_property_map(const ProcessGroup& pg, GlobalMap global,
0666                               StorageMap storage)
0667 {
0668   typedef distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
0669     result_type;
0670   return result_type(pg, global, storage);
0671 }
0672 
0673 /**
0674  * \overload
0675  */
0676 template<typename ProcessGroup, typename GlobalMap, typename StorageMap,
0677          typename Reduce>
0678 inline distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
0679 make_distributed_property_map(const ProcessGroup& pg, GlobalMap global,
0680                               StorageMap storage, Reduce reduce)
0681 {
0682   typedef distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
0683     result_type;
0684   return result_type(pg, global, storage, reduce);
0685 }
0686 
0687 } } // end namespace boost::parallel
0688 
0689 #include <boost/property_map/parallel/impl/distributed_property_map.ipp>
0690 
0691 #undef PBGL_DISTRIB_PMAP
0692 
0693 #endif // BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP