Main Page | Modules | Namespace List | Class Hierarchy | Class List | File List | Namespace Members | Class Members | Related Pages

concurrent_hash_map.h

00001 /*
00002     Copyright 2005-2008 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023 
00024 #include <stdexcept>
00025 #include <iterator>
00026 #include <utility>      // Need std::pair from here
00027 #include "tbb_stddef.h"
00028 #include "cache_aligned_allocator.h"
00029 #include "tbb_allocator.h"
00030 #include "spin_rw_mutex.h"
00031 #include "atomic.h"
00032 #include "aligned_space.h"
00033 #if TBB_PERFORMANCE_WARNINGS
00034 #include <typeinfo>
00035 #endif
00036 
00037 namespace tbb {
00038 
00039 template<typename Key, typename T, typename HashCompare, typename A = tbb_allocator<std::pair<Key, T> > >
00040 class concurrent_hash_map;
00041 
00043 namespace internal {
00045     class hash_map_base {
00046     public:
00047         // Mutex types for each layer of the container
00048         typedef spin_rw_mutex node_mutex_t;
00049         typedef spin_rw_mutex chain_mutex_t;
00050         typedef spin_rw_mutex segment_mutex_t;
00051 
00053         typedef size_t hashcode_t;
00055         static const size_t n_segment_bits = 6;
00057         static const size_t n_segment = size_t(1)<<n_segment_bits; 
00059         static const size_t max_physical_size = size_t(1)<<(8*sizeof(hashcode_t)-n_segment_bits);
00060     };
00061 
00062     template<typename Iterator>
00063     class hash_map_range;
00064 
00065     struct hash_map_segment_base {
00067         hash_map_base::segment_mutex_t my_mutex;
00068 
00069         // Number of nodes
00070         atomic<size_t> my_logical_size;
00071 
00072         // Size of chains
00074         size_t my_physical_size;
00075 
00077 
00078         bool internal_grow_predicate() const;
00079     };
00080 
00082 
00084     template<typename Container, typename Value>
00085     class hash_map_iterator
00086 #if defined(_WIN64) && defined(_MSC_VER) 
00087         // Ensure that Microsoft's internal template function _Val_type works correctly.
00088         : public std::iterator<std::forward_iterator_tag,Value>
00089 #endif /* defined(_WIN64) && defined(_MSC_VER) */
00090     {
00091         typedef typename Container::node node;
00092         typedef typename Container::chain chain;
00093 
00095         Container* my_table;
00096 
00098         node* my_node;
00099 
00101         size_t my_array_index;
00102 
00104         size_t my_segment_index;
00105 
00106         template<typename C, typename T, typename U>
00107         friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00108 
00109         template<typename C, typename T, typename U>
00110         friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00111 
00112         template<typename C, typename T, typename U>
00113         friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00114     
00115         template<typename C, typename U>
00116         friend class internal::hash_map_iterator;
00117 
00118         template<typename I>
00119         friend class internal::hash_map_range;
00120 
00121         void advance_to_next_node() {
00122             size_t i = my_array_index+1;
00123             do {
00124                 while( i<my_table->my_segment[my_segment_index].my_physical_size ) {
00125                     my_node = my_table->my_segment[my_segment_index].my_array[i].node_list;
00126                     if( my_node ) goto done;
00127                     ++i;
00128                 }
00129                 i = 0;
00130             } while( ++my_segment_index<my_table->n_segment );
00131         done:
00132             my_array_index = i;
00133         }
00134 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00135         template<typename Key, typename T, typename HashCompare, typename A>
00136         friend class tbb::concurrent_hash_map;
00137 #else
00138     public: // workaround
00139 #endif
00140         hash_map_iterator( const Container& table, size_t segment_index, size_t array_index=0, node* b=NULL );
00141     public:
00143         hash_map_iterator() {}
00144         hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type>& other ) :
00145             my_table(other.my_table),
00146             my_node(other.my_node),
00147             my_array_index(other.my_array_index),
00148             my_segment_index(other.my_segment_index)
00149         {}
00150         Value& operator*() const {
00151             __TBB_ASSERT( my_node, "iterator uninitialized or at end of container?" );
00152             return my_node->item;
00153         }
00154         Value* operator->() const {return &operator*();}
00155         hash_map_iterator& operator++();
00156         
00158         Value* operator++(int) {
00159             Value* result = &operator*();
00160             operator++();
00161             return result;
00162         }
00163 
00164         // STL support
00165 
00166         typedef ptrdiff_t difference_type;
00167         typedef Value value_type;
00168         typedef Value* pointer;
00169         typedef Value& reference;
00170         typedef const Value& const_reference;
00171         typedef std::forward_iterator_tag iterator_category;
00172     };
00173 
00174     template<typename Container, typename Value>
00175     hash_map_iterator<Container,Value>::hash_map_iterator( const Container& table, size_t segment_index, size_t array_index, node* b ) : 
00176         my_table(const_cast<Container*>(&table)),
00177         my_node(b),
00178         my_array_index(array_index),
00179         my_segment_index(segment_index)
00180     {
00181         if( segment_index<my_table->n_segment ) {
00182             if( !my_node ) {
00183                 chain* first_chain = my_table->my_segment[segment_index].my_array;
00184                 if( first_chain ) my_node = first_chain[my_array_index].node_list;
00185             }
00186             if( !my_node ) advance_to_next_node();
00187         }
00188     }
00189 
00190     template<typename Container, typename Value>
00191     hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00192         my_node=my_node->next;
00193         if( !my_node ) advance_to_next_node();
00194         return *this;
00195     }
00196 
00197     template<typename Container, typename T, typename U>
00198     bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00199         return i.my_node==j.my_node;
00200     }
00201 
00202     template<typename Container, typename T, typename U>
00203     bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00204         return i.my_node!=j.my_node;
00205     }
00206 
00208 
00209     template<typename Iterator>
00210     class hash_map_range {
00211     private:
00212         Iterator my_begin;
00213         Iterator my_end;
00214         mutable Iterator my_midpoint;
00215         size_t my_grainsize;
00217         void set_midpoint() const;
00218         template<typename U> friend class hash_map_range;
00219     public:
00221         typedef std::size_t size_type;
00222         typedef typename Iterator::value_type value_type;
00223         typedef typename Iterator::reference reference;
00224         typedef typename Iterator::const_reference const_reference;
00225         typedef typename Iterator::difference_type difference_type;
00226         typedef Iterator iterator;
00227 
00229         bool empty() const {return my_begin==my_end;}
00230 
00232         bool is_divisible() const {
00233             return my_midpoint!=my_end;
00234         }
00236         hash_map_range( hash_map_range& r, split ) : 
00237             my_end(r.my_end),
00238             my_grainsize(r.my_grainsize)
00239         {
00240             r.my_end = my_begin = r.my_midpoint;
00241             set_midpoint();
00242             r.set_midpoint();
00243         }
00245         template<typename U>
00246         hash_map_range( hash_map_range<U>& r) : 
00247             my_begin(r.my_begin),
00248             my_end(r.my_end),
00249             my_midpoint(r.my_midpoint),
00250             my_grainsize(r.my_grainsize)
00251         {}
00253         hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize = 1 ) : 
00254             my_begin(begin_), 
00255             my_end(end_), 
00256             my_grainsize(grainsize) 
00257         {
00258             set_midpoint();
00259             __TBB_ASSERT( grainsize>0, "grainsize must be positive" );
00260         }
00261         const Iterator& begin() const {return my_begin;}
00262         const Iterator& end() const {return my_end;}
00264         size_type grainsize() const {return my_grainsize;}
00265     };
00266 
00267     template<typename Iterator>
00268     void hash_map_range<Iterator>::set_midpoint() const {
00269         size_t n = my_end.my_segment_index-my_begin.my_segment_index;
00270         if( n>1 || (n==1 && my_end.my_array_index>0) ) {
00271             // Split by groups of segments
00272             my_midpoint = Iterator(*my_begin.my_table,(my_end.my_segment_index+my_begin.my_segment_index)/2u);
00273         } else {
00274             // Split by groups of nodes
00275             size_t m = my_end.my_array_index-my_begin.my_array_index;
00276             if( m>my_grainsize ) {
00277                 my_midpoint = Iterator(*my_begin.my_table,my_begin.my_segment_index,m/2u);
00278             } else {
00279                 my_midpoint = my_end;
00280             }
00281         }
00282         __TBB_ASSERT( my_midpoint.my_segment_index<=my_begin.my_table->n_segment, NULL );
00283     }  
00284 } // namespace internal
00286 
00288 
00313 template<typename Key, typename T, typename HashCompare, typename A>
00314 class concurrent_hash_map : protected internal::hash_map_base {
00315     template<typename Container, typename Value>
00316     friend class internal::hash_map_iterator;
00317 
00318     template<typename I>
00319     friend class internal::hash_map_range;
00320 
00321     struct node;
00322     friend struct node;
00323     typedef typename A::template rebind<node>::other node_allocator_type;
00324 
00325 public:
00326     class const_accessor;
00327     friend class const_accessor;
00328     class accessor;
00329 
00330     typedef Key key_type;
00331     typedef T mapped_type;
00332     typedef std::pair<const Key,T> value_type;
00333     typedef size_t size_type;
00334     typedef ptrdiff_t difference_type;
00335     typedef value_type *pointer;
00336     typedef const value_type *const_pointer;
00337     typedef value_type &reference;
00338     typedef const value_type &const_reference;
00339     typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00340     typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00341     typedef internal::hash_map_range<iterator> range_type;
00342     typedef internal::hash_map_range<const_iterator> const_range_type;
00343     typedef A allocator_type;
00344 
00346     class const_accessor {
00347         friend class concurrent_hash_map;
00348         friend class accessor;
00349         void operator=( const accessor& ) const; // Deny access
00350         const_accessor( const accessor& );       // Deny access
00351     public:
00353         typedef const std::pair<const Key,T> value_type;
00354 
00356         bool empty() const {return !my_node;}
00357 
00359         void release() {
00360             if( my_node ) {
00361                 my_lock.release();
00362                 my_node = NULL;
00363             }
00364         }
00365 
00367         const_reference operator*() const {
00368             __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00369             return my_node->item;
00370         }
00371 
00373         const_pointer operator->() const {
00374             return &operator*();
00375         }
00376 
00378         const_accessor() : my_node(NULL) {}
00379 
00381         ~const_accessor() {
00382             my_node = NULL; // my_lock.release() is called in scoped_lock destructor
00383         }
00384     private:
00385         node* my_node;
00386         node_mutex_t::scoped_lock my_lock;
00387         hashcode_t my_hash;
00388     };
00389 
00391     class accessor: public const_accessor {
00392     public:
00394         typedef std::pair<const Key,T> value_type;
00395 
00397         reference operator*() const {
00398             __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00399             return this->my_node->item;
00400         }
00401 
00403         pointer operator->() const {
00404             return &operator*();
00405         }       
00406     };
00407 
00409     concurrent_hash_map(const allocator_type &a = allocator_type())
00410         : my_allocator(a)
00411 
00412     {
00413         initialize();
00414     }
00415 
00417     concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00418         : my_allocator(a)
00419     {
00420         initialize();
00421         internal_copy(table);
00422     }
00423 
00425     template<typename I>
00426     concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00427         : my_allocator(a)
00428     {
00429         initialize();
00430         internal_copy(first, last);
00431     }
00432 
00434     concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00435         if( this!=&table ) {
00436             clear();
00437             internal_copy(table);
00438         } 
00439         return *this;
00440     }
00441 
00442 
00444     void clear();
00445 
00447     ~concurrent_hash_map();
00448 
00449     //------------------------------------------------------------------------
00450     // Parallel algorithm support
00451     //------------------------------------------------------------------------
00452     range_type range( size_type grainsize=1 ) {
00453         return range_type( begin(), end(), grainsize );
00454     }
00455     const_range_type range( size_type grainsize=1 ) const {
00456         return const_range_type( begin(), end(), grainsize );
00457     }
00458 
00459     //------------------------------------------------------------------------
00460     // STL support - not thread-safe methods
00461     //------------------------------------------------------------------------
00462     iterator begin() {return iterator(*this,0);}
00463     iterator end() {return iterator(*this,n_segment);}
00464     const_iterator begin() const {return const_iterator(*this,0);}
00465     const_iterator end() const {return const_iterator(*this,n_segment);}
00466     std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00467     std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00468     
00470 
00472     size_type size() const;
00473 
00475     bool empty() const;
00476 
00478     size_type max_size() const {return (~size_type(0))/sizeof(node);}
00479 
00481     allocator_type get_allocator() const { return this->my_allocator; }
00482 
00484     void swap(concurrent_hash_map &table);
00485 
00486     //------------------------------------------------------------------------
00487     // concurrent map operations
00488     //------------------------------------------------------------------------
00489 
00491     size_type count( const Key& key ) const {
00492         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(NULL, key, /*write=*/false, NULL );
00493     }
00494 
00496 
00497     bool find( const_accessor& result, const Key& key ) const {
00498         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(&result, key, /*write=*/false, NULL );
00499     }
00500 
00502 
00503     bool find( accessor& result, const Key& key ) {
00504         return lookup</*insert*/false>(&result, key, /*write=*/true, NULL );
00505     }
00506         
00508 
00509     bool insert( const_accessor& result, const Key& key ) {
00510         return lookup</*insert*/true>(&result, key, /*write=*/false, NULL );
00511     }
00512 
00514 
00515     bool insert( accessor& result, const Key& key ) {
00516         return lookup</*insert*/true>(&result, key, /*write=*/true, NULL );
00517     }
00518 
00520 
00521     bool insert( const_accessor& result, const value_type& value ) {
00522         return lookup</*insert*/true>(&result, value.first, /*write=*/false, &value.second );
00523     }
00524 
00526 
00527     bool insert( accessor& result, const value_type& value ) {
00528         return lookup</*insert*/true>(&result, value.first, /*write=*/true, &value.second );
00529     }
00530 
00532 
00533     bool insert( const value_type& value ) {
00534         return lookup</*insert*/true>(NULL, value.first, /*write=*/false, &value.second );
00535     }
00536 
00538     template<typename I>
00539     void insert(I first, I last) {
00540         for(; first != last; ++first)
00541             insert( *first );
00542     }
00543 
00545 
00546     bool erase( const Key& key );
00547 
00549 
00550     bool erase( const_accessor& item_accessor ) {
00551         return exclude( item_accessor, /*readonly=*/ true );
00552     }
00553 
00555 
00556     bool erase( accessor& item_accessor ) {
00557         return exclude( item_accessor, /*readonly=*/ false );
00558     }
00559 
00560 private:
00562     struct node {
00564         node* next;
00565         node_mutex_t mutex;
00566         value_type item;
00567         node( const Key& key ) : item(key, T()) {}
00568         node( const Key& key, const T& t ) : item(key, t) {}
00569         // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17
00570         void* operator new( size_t size, node_allocator_type& a ) {
00571             void *ptr = a.allocate(1);
00572             if(!ptr) throw std::bad_alloc();
00573             return ptr;
00574         }
00575         // match placement-new form above to be called if exception thrown in constructor
00576         void operator delete( void* ptr, node_allocator_type& a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00577     };
00578 
00579     struct chain;
00580     friend struct chain;
00581 
00583 
00584     struct chain {
00585         void push_front( node& b ) {
00586             b.next = node_list;
00587             node_list = &b;
00588         }
00589         chain_mutex_t mutex;
00590         node* node_list;
00591     };
00592 
00593     struct segment;
00594     friend struct segment;
00595 
00597 
00599     struct segment: internal::hash_map_segment_base {
00600 #if TBB_DO_ASSERT
00601         ~segment() {
00602             __TBB_ASSERT( !my_array, "should have been cleared earlier" );
00603         }
00604 #endif /* TBB_DO_ASSERT */
00605 
00606         // Pointer to array of chains
00607         chain* my_array;
00608 
00609         // Get chain in this segment that corresponds to given hash code.
00610         chain& get_chain( hashcode_t hashcode, size_t n_segment_bits ) {
00611             return my_array[(hashcode>>n_segment_bits)&(my_physical_size-1)];
00612         }
00613      
00615 
00617         void allocate_array( size_t new_size ) {
00618             size_t n=(internal::NFS_GetLineSize()+sizeof(chain)-1)/sizeof(chain);
00619             __TBB_ASSERT((n&(n-1))==0, NULL);
00620             while( n<new_size ) n<<=1;
00621             chain* array = cache_aligned_allocator<chain>().allocate( n );
00622             // storing earlier might help overcome false positives of in deducing "bool grow" in concurrent threads
00623             __TBB_store_with_release(my_physical_size, n);
00624             memset( array, 0, n*sizeof(chain) );
00625             my_array = array;
00626         }
00627     };
00628 
00629     segment& get_segment( hashcode_t hashcode ) {
00630         return my_segment[hashcode&(n_segment-1)];
00631     }
00632 
00633     node_allocator_type my_allocator;
00634 
00635     HashCompare my_hash_compare;
00636 
00637     segment* my_segment;
00638 
00639     node* create_node(const Key& key, const T* t) {
00640         // exception-safe allocation and construction
00641         if(t) return new( my_allocator ) node(key, *t);
00642         else  return new( my_allocator ) node(key);
00643     }
00644 
00645     void delete_node(node* b) {
00646         my_allocator.destroy(b);
00647         my_allocator.deallocate(b, 1);
00648     }
00649 
00650     node* search_list( const Key& key, chain& c ) const {
00651         node* b = c.node_list;
00652         while( b && !my_hash_compare.equal(key, b->item.first) )
00653             b = b->next;
00654         return b;
00655     }
00657     template<typename I>
00658     std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00659 
00661     bool exclude( const_accessor& item_accessor, bool readonly );
00662 
00664     void grow_segment( segment_mutex_t::scoped_lock& segment_lock, segment& s );
00665 
00667     template<bool op_insert>
00668     bool lookup( const_accessor* result, const Key& key, bool write, const T* t );
00669 
00671     void initialize() {
00672         my_segment = cache_aligned_allocator<segment>().allocate(n_segment);
00673         memset( my_segment, 0, sizeof(segment)*n_segment );
00674      }
00675 
00677     void internal_copy( const concurrent_hash_map& source );
00678 
00679     template<typename I>
00680     void internal_copy(I first, I last);
00681 };
00682 
00683 template<typename Key, typename T, typename HashCompare, typename A>
00684 concurrent_hash_map<Key,T,HashCompare,A>::~concurrent_hash_map() {
00685     clear();
00686     cache_aligned_allocator<segment>().deallocate( my_segment, n_segment );
00687 }
00688 
00689 template<typename Key, typename T, typename HashCompare, typename A>
00690 typename concurrent_hash_map<Key,T,HashCompare,A>::size_type concurrent_hash_map<Key,T,HashCompare,A>::size() const {
00691     size_type result = 0;
00692     for( size_t k=0; k<n_segment; ++k )
00693         result += my_segment[k].my_logical_size;
00694     return result;
00695 }
00696 
00697 template<typename Key, typename T, typename HashCompare, typename A>
00698 bool concurrent_hash_map<Key,T,HashCompare,A>::empty() const {
00699     for( size_t k=0; k<n_segment; ++k )
00700         if( my_segment[k].my_logical_size )
00701             return false;
00702     return true;
00703 }
00704 
00705 template<typename Key, typename T, typename HashCompare, typename A>
00706 template<bool op_insert>
00707 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( const_accessor* result, const Key& key, bool write, const T* t ) {
00708     if( result /*&& result->my_node -- checked in release() */ )
00709         result->release();
00710     const hashcode_t h = my_hash_compare.hash( key );
00711     segment& s = get_segment(h);
00712 restart:
00713     bool return_value = false;
00714     // first check in double-check sequence
00715 #if TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT
00716     const bool grow = op_insert && s.internal_grow_predicate();
00717 #else
00718     const bool grow = op_insert && s.my_logical_size >= s.my_physical_size
00719         && s.my_physical_size < max_physical_size; // check whether there are free bits
00720 #endif /* TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT */
00721     segment_mutex_t::scoped_lock segment_lock( s.my_mutex, /*write=*/grow );
00722     if( grow ) { // Load factor is too high  
00723         grow_segment( segment_lock, s );
00724     }
00725     if( !s.my_array ) {
00726         __TBB_ASSERT( !op_insert, NULL );
00727         return false;
00728     }
00729     __TBB_ASSERT( (s.my_physical_size&(s.my_physical_size-1))==0, NULL );
00730     chain& c = s.get_chain( h, n_segment_bits );
00731     chain_mutex_t::scoped_lock chain_lock( c.mutex, /*write=*/false );
00732 
00733     node* b = search_list( key, c );
00734     if( op_insert ) {
00735         if( !b ) {
00736             b = create_node(key, t);
00737             // Search failed
00738             if( !chain_lock.upgrade_to_writer() ) {
00739                 // Rerun search_list, in case another thread inserted the item during the upgrade.
00740                 node* b_temp = search_list( key, c );
00741                 if( b_temp ) { // unfortunately, it did
00742                     chain_lock.downgrade_to_reader();
00743                     delete_node( b );
00744                     b = b_temp;
00745                     goto done;
00746                 }
00747             }
00748             ++s.my_logical_size; // we can't change it earlier due to correctness of size() and exception safety of equal()
00749             return_value = true;
00750             c.push_front( *b );
00751         }
00752     } else { // find or count
00753         if( !b )      return false;
00754         return_value = true;
00755     }
00756 done:
00757     if( !result ) return return_value;
00758     if( !result->my_lock.try_acquire( b->mutex, write ) ) {
00759         // we are unlucky, prepare for longer wait
00760         internal::AtomicBackoff trials;
00761         do {
00762             if( !trials.bounded_pause() ) {
00763                 // the wait takes really long, restart the operation
00764                 chain_lock.release(); segment_lock.release();
00765                 __TBB_Yield();
00766                 goto restart;
00767             }
00768         } while( !result->my_lock.try_acquire( b->mutex, write ) );
00769     }
00770     result->my_node = b;
00771     result->my_hash = h;
00772     return return_value;
00773 }
00774 
00775 template<typename Key, typename T, typename HashCompare, typename A>
00776 template<typename I>
00777 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end ) const {
00778     hashcode_t h = my_hash_compare.hash( key );
00779     size_t segment_index = h&(n_segment-1);
00780     segment& s = my_segment[segment_index ];
00781     size_t chain_index = (h>>n_segment_bits)&(s.my_physical_size-1);
00782     if( !s.my_array )
00783         return std::make_pair(end, end);
00784     chain& c = s.my_array[chain_index];
00785     node* b = search_list( key, c );
00786     if( !b )
00787         return std::make_pair(end, end);
00788     iterator lower(*this, segment_index, chain_index, b), upper(lower);
00789     return std::make_pair(lower, ++upper);
00790 }
00791 
00792 template<typename Key, typename T, typename HashCompare, typename A>
00793 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
00794     hashcode_t h = my_hash_compare.hash( key );
00795     segment& s = get_segment( h );
00796     node* b;
00797     {
00798         bool chain_locked_for_write = false;
00799         segment_mutex_t::scoped_lock segment_lock( s.my_mutex, /*write=*/false );
00800         if( !s.my_array ) return false;
00801         __TBB_ASSERT( (s.my_physical_size&(s.my_physical_size-1))==0, NULL );
00802         chain& c = s.get_chain( h, n_segment_bits );
00803         chain_mutex_t::scoped_lock chain_lock( c.mutex, /*write=*/false );
00804     search:
00805         node** p = &c.node_list;
00806         b = *p;
00807         while( b && !my_hash_compare.equal(key, b->item.first ) ) {
00808             p = &b->next;
00809             b = *p;
00810         }
00811         if( !b ) return false;
00812         if( !chain_locked_for_write && !chain_lock.upgrade_to_writer() ) {
00813             chain_locked_for_write = true;
00814             goto search;
00815         }
00816         *p = b->next;
00817         --s.my_logical_size;
00818     }
00819     {
00820         node_mutex_t::scoped_lock item_locker( b->mutex, /*write=*/true );
00821     }
00822     // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
00823     delete_node( b ); // Only one thread can delete it due to write lock on the chain_mutex
00824     return true;        
00825 }
00826 
00827 template<typename Key, typename T, typename HashCompare, typename A>
00828 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
00829     __TBB_ASSERT( item_accessor.my_node, NULL );
00830     const hashcode_t h = item_accessor.my_hash;
00831     node *const b = item_accessor.my_node;
00832     item_accessor.my_node = NULL; // we ought release accessor anyway
00833     segment& s = get_segment( h );
00834     {
00835         segment_mutex_t::scoped_lock segment_lock( s.my_mutex, /*write=*/false );
00836         __TBB_ASSERT( s.my_array, NULL );
00837         __TBB_ASSERT( (s.my_physical_size&(s.my_physical_size-1))==0, NULL );
00838         chain& c = s.get_chain( h, n_segment_bits );
00839         chain_mutex_t::scoped_lock chain_lock( c.mutex, /*write=*/true );
00840         node** p = &c.node_list;
00841         while( *p && *p != b )
00842             p = &(*p)->next;
00843         if( !*p ) { // someone else was the first
00844             item_accessor.my_lock.release();
00845             return false;
00846         }
00847         __TBB_ASSERT( *p == b, NULL );
00848         *p = b->next;
00849         --s.my_logical_size;
00850     }
00851     if( readonly ) // need to get exclusive lock
00852         item_accessor.my_lock.upgrade_to_writer(); // return value means nothing here
00853     item_accessor.my_lock.release();
00854     delete_node( b ); // Only one thread can delete it due to write lock on the chain_mutex
00855     return true;
00856 }
00857 
00858 template<typename Key, typename T, typename HashCompare, typename A>
00859 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
00860     std::swap(this->my_allocator, table.my_allocator);
00861     std::swap(this->my_hash_compare, table.my_hash_compare);
00862     std::swap(this->my_segment, table.my_segment);
00863 }
00864 
00865 template<typename Key, typename T, typename HashCompare, typename A>
00866 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
00867 #if TBB_PERFORMANCE_WARNINGS
00868     size_t total_physical_size = 0, min_physical_size = size_t(-1L), max_physical_size = 0; //< usage statistics
00869     static bool reported = false;
00870 #endif
00871     for( size_t i=0; i<n_segment; ++i ) {
00872         segment& s = my_segment[i];
00873         size_t n = s.my_physical_size;
00874         if( chain* array = s.my_array ) {
00875             s.my_array = NULL;
00876             s.my_physical_size = 0;
00877             s.my_logical_size = 0;
00878             for( size_t j=0; j<n; ++j ) {
00879                 while( node* b = array[j].node_list ) {
00880                     array[j].node_list = b->next;
00881                     delete_node(b);
00882                 }
00883             }
00884             cache_aligned_allocator<chain>().deallocate( array, n );
00885         }
00886 #if TBB_PERFORMANCE_WARNINGS
00887         total_physical_size += n;
00888         if(min_physical_size > n) min_physical_size = n;
00889         if(max_physical_size < n) max_physical_size = n;
00890     }
00891     if( !reported
00892         && ( (total_physical_size >= n_segment*48 && min_physical_size < total_physical_size/n_segment/2)
00893          || (total_physical_size >= n_segment*128 && max_physical_size > total_physical_size/n_segment*2) ) )
00894     {
00895         reported = true;
00896         internal::runtime_warning(
00897             "Performance is not optimal because the hash function produces bad randomness in lower bits in %s",
00898             typeid(*this).name() );
00899 #endif
00900     }
00901 }
00902 
00903 template<typename Key, typename T, typename HashCompare, typename A>
00904 void concurrent_hash_map<Key,T,HashCompare,A>::grow_segment( segment_mutex_t::scoped_lock& segment_lock, segment& s ) {
00905     // Following is second check in a double-check.
00906     if( s.my_logical_size >= s.my_physical_size ) {
00907         chain* old_array = s.my_array;
00908         size_t old_size = s.my_physical_size;
00909         s.allocate_array( s.my_logical_size+1 );
00910         for( size_t k=0; k<old_size; ++k )
00911             while( node* b = old_array[k].node_list ) {
00912                 old_array[k].node_list = b->next;
00913                 hashcode_t h = my_hash_compare.hash( b->item.first );
00914                 __TBB_ASSERT( &get_segment(h)==&s, "hash function changed?" );
00915                 s.get_chain(h,n_segment_bits).push_front(*b);
00916             }
00917         cache_aligned_allocator<chain>().deallocate( old_array, old_size );
00918     }
00919     segment_lock.downgrade_to_reader();
00920 }
00921 
00922 template<typename Key, typename T, typename HashCompare, typename A>
00923 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
00924     for( size_t i=0; i<n_segment; ++i ) {
00925         segment& s = source.my_segment[i];
00926         __TBB_ASSERT( !my_segment[i].my_array, "caller should have cleared" );
00927         if( s.my_logical_size ) {
00928             segment& d = my_segment[i];
00929             d.allocate_array( s.my_logical_size );
00930             d.my_logical_size = s.my_logical_size;
00931             size_t s_size = s.my_physical_size;
00932             chain* s_array = s.my_array;
00933             chain* d_array = d.my_array;
00934             for( size_t k=0; k<s_size; ++k )
00935                 for( node* b = s_array[k].node_list; b; b=b->next ) {
00936                     __TBB_ASSERT( &get_segment(my_hash_compare.hash( b->item.first ))==&d, "hash function changed?" );
00937                     node* b_new = create_node(b->item.first, &b->item.second);
00938                     d_array[k].push_front(*b_new); // hashcode is the same and segment and my_physical sizes are the same
00939                 }
00940         }
00941     }
00942 }
00943 
00944 template<typename Key, typename T, typename HashCompare, typename A>
00945 template<typename I>
00946 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
00947     for(; first != last; ++first)
00948         insert( *first );
00949 }
00950 
00951 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
00952 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
00953     if(a.size() != b.size()) return false;
00954     typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
00955     typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
00956     for(; i != i_end; ++i) {
00957         j = b.equal_range(i->first).first;
00958         if( j == j_end || !(i->second == j->second) ) return false;
00959     }
00960     return true;
00961 }
00962 
00963 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
00964 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
00965 {    return !(a == b); }
00966 
00967 template<typename Key, typename T, typename HashCompare, typename A>
00968 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
00969 {    a.swap( b ); }
00970 
00971 } // namespace tbb
00972 
00973 #endif /* __TBB_concurrent_hash_map_H */

Copyright © 2005-2008 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.