CVC3
|
00001 /*****************************************************************************/ 00002 /*! 00003 *\file hash_table.h 00004 *\brief hash table implementation 00005 * 00006 * Author: Alexander Fuchs 00007 * 00008 * Created: Thu Oct 19 11:04:00 2006 00009 * 00010 * <hr> 00011 * 00012 * License to use, copy, modify, sell and/or distribute this software 00013 * and its documentation for any purpose is hereby granted without 00014 * royalty, subject to the terms and conditions defined in the \ref 00015 * LICENSE file provided with this distribution. 00016 * 00017 * <hr> 00018 */ 00019 /*****************************************************************************/ 00020 00021 /* 00022 * Copyright (c) 1996,1997 00023 * Silicon Graphics Computer Systems, Inc. 00024 * 00025 * Permission to use, copy, modify, distribute and sell this software 00026 * and its documentation for any purpose is hereby granted without fee, 00027 * provided that the above copyright notice appear in all copies and 00028 * that both that copyright notice and this permission notice appear 00029 * in supporting documentation. Silicon Graphics makes no 00030 * representations about the suitability of this software for any 00031 * purpose. It is provided "as is" without express or implied warranty. 00032 * 00033 * 00034 * Copyright (c) 1994 00035 * Hewlett-Packard Company 00036 * 00037 * Permission to use, copy, modify, distribute and sell this software 00038 * and its documentation for any purpose is hereby granted without fee, 00039 * provided that the above copyright notice appear in all copies and 00040 * that both that copyright notice and this permission notice appear 00041 * in supporting documentation. Hewlett-Packard Company makes no 00042 * representations about the suitability of this software for any 00043 * purpose. It is provided "as is" without express or implied warranty. 00044 * 00045 */ 00046 00047 // this implementation is in essence a subset of the SGI implementation: 00048 // http://www.sgi.com/tech/stl/stl_hashtable.h 00049 00050 #ifndef _cvc3__hash__hash_table_h_ 00051 #define _cvc3__hash__hash_table_h_ 00052 00053 #include <vector> 00054 #include <string> 00055 #include <functional> 00056 #include <algorithm> 00057 #include "hash_fun.h" 00058 #include "os.h" 00059 00060 // For some reason, including debug.h doesn't work--so redeclare macros here 00061 00062 #ifdef _CVC3_DEBUG_MODE 00063 #define DebugAssert(cond, str) if(!(cond)) \ 00064 CVC3::debugError(__FILE__, __LINE__, #cond, str) 00065 namespace CVC3 { 00066 extern CVC_DLL void debugError(const std::string& file, int line, 00067 const std::string& cond, const std::string& msg); 00068 } 00069 #else 00070 #define DebugAssert(cond, str) 00071 #endif 00072 00073 namespace Hash { 00074 // get size_t from hash_fun, which gets it from cstddef 00075 typedef size_t size_type; 00076 00077 /// primes for increasing the hash table size 00078 00079 // Note: assumes size_type is unsigned and at least 32 bits. 00080 const size_type num_primes = 28; 00081 00082 static const size_type prime_list[num_primes] = { 00083 53ul, 97ul, 193ul, 389ul, 769ul, 00084 1543ul, 3079ul, 6151ul, 12289ul, 24593ul, 00085 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, 00086 1572869ul, 3145739ul, 6291469ul, 12582917ul, 25165843ul, 00087 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, 00088 1610612741ul, 3221225473ul, 4294967291ul 00089 }; 00090 00091 inline size_type next_prime(size_type n) 00092 { 00093 const size_type* first = prime_list; 00094 const size_type* last = prime_list + (size_type)num_primes; 00095 const size_type* pos = std::lower_bound(first, last, n); 00096 return pos == last ? *(last - 1) : *pos; 00097 } 00098 00099 00100 /*! template to instante to hash map and hash set 00101 00102 based on the sgi implementation: 00103 http://www.sgi.com/tech/stl/HashedAssociativeContainer.html 00104 00105 _Key: hash key type 00106 _Data: key + value data to store 00107 _HashFcn: functional class providing a hash function: int(_Key) 00108 Note: in some STL implementations hash is already part of 00109 some extension an in namespace std or stdext, in some it is not. 00110 So we assume that it is not available. :TODO: 00111 _EqualKey: functional class providing a comparison function: bool(_Key, _Key) 00112 returns true iff two keys are considered to be equal 00113 _ExtractKey: extracts key from _Data: _Key(_Data) 00114 */ 00115 // 00116 // can't use debug.h as debug.h uses hash maps... 00117 // 00118 // implemented as an array of lists (buckets) 00119 template <class _Key, class _Value, 00120 class _HashFcn, class _EqualKey, class _ExtractKey> 00121 class hash_table { 00122 00123 /// types 00124 00125 public: 00126 // interface typedefs 00127 typedef Hash::size_type size_type; 00128 typedef _Key key_type; 00129 typedef _Value value_type; 00130 typedef _HashFcn hasher; 00131 typedef _EqualKey key_equal; 00132 00133 protected: 00134 // a bucket is a list of values 00135 // using an STL list makes it more complicated to have a nice end iterator, 00136 // as iterators of different lists can not be compared 00137 // (at least in the MS STL). 00138 // so instead we implement our own single-linked list here, 00139 // where NULL is the end iterator for all lists. 00140 struct BucketNode { 00141 BucketNode(BucketNode* next, const value_type& value) 00142 : d_next(next), d_value(value) 00143 { }; 00144 BucketNode* d_next; 00145 value_type d_value; 00146 }; 00147 typedef BucketNode Bucket; 00148 00149 // the buckets are kept in an array 00150 typedef std::vector<Bucket*> Data; 00151 typedef typename Data::iterator data_iter; 00152 typedef typename Data::const_iterator data_const_iter; 00153 00154 00155 public: 00156 // iterators 00157 class iterator; 00158 friend class iterator; 00159 class const_iterator; 00160 friend class const_iterator; 00161 00162 00163 00164 /// variables 00165 00166 protected: 00167 /// template parameters 00168 00169 // the hash function for a key 00170 hasher d_hash; 00171 00172 // the equality function between keys 00173 key_equal d_equal; 00174 00175 // extraction of key from data 00176 _ExtractKey d_extractKey; 00177 00178 00179 // the current number of elements - stored for efficiency 00180 size_type d_size; 00181 00182 // the hash table - an array of buckets 00183 Data d_data; 00184 00185 00186 /// methods 00187 00188 protected: 00189 00190 /// template parameters 00191 00192 00193 // the hash function for a key 00194 size_type hash(const key_type& key) const { 00195 return d_hash(key); 00196 } 00197 00198 // equality between keys 00199 bool equal(const key_type& key1, const key_type& key2) const { 00200 return d_equal(key1, key2); 00201 } 00202 00203 // extraction of a key 00204 const key_type& extractKey(const value_type& value) const { 00205 return d_extractKey(value); 00206 } 00207 00208 00209 /// bucket retrieval 00210 00211 // returns the index in the array which contains the bucket 00212 // with the keys mapping to the same hash value as key 00213 size_type getBucketIndex(const key_type& key) const { 00214 return (hash(key) % d_data.size()); 00215 } 00216 00217 Bucket* getBucketByKey(const key_type& key) { 00218 return getBucketByIndex(getBucketIndex(key)); 00219 } 00220 00221 const Bucket* getBucketByKey(const key_type& key) const { 00222 return getBucketByIndex(getBucketIndex(key)); 00223 } 00224 00225 Bucket* getBucketByIndex(const size_type index) { 00226 DebugAssert(index < d_data.size(),"hash_table::getBucketByIndex"); 00227 return d_data.at(index); 00228 } 00229 00230 const Bucket* getBucketByIndex(const size_type index) const { 00231 DebugAssert(index < d_data.size(),"hash_table::getBucketByIndex (const)"); 00232 return d_data.at(index); 00233 } 00234 00235 00236 00237 /// resize 00238 00239 // increase the size of the table, typically if the load factor is too high 00240 void resize() { 00241 if (load_factor() > 1) { 00242 // create new table with doubled size size 00243 //size_type size = 2 * d_data.size(); 00244 // this is simple, but might not be efficient for bad hash functions, 00245 // which for example mainly hash to values which contain 2 as a factor. 00246 // thus, instead we go from a prime to a prime of more or less double size. 00247 size_type size = next_prime(d_data.size() + 1); 00248 Data copy(size, NULL); 00249 00250 // move entries to new table 00251 for (size_type i = 0; i < d_data.size(); ++i) { 00252 // head of current bucket to move 00253 BucketNode* bucket = d_data[i]; 00254 while (bucket != NULL) { 00255 // move head of old bucket 00256 BucketNode* current = bucket; 00257 bucket = bucket->d_next; 00258 00259 // move old head to new bucket 00260 size_type new_index = hash(extractKey(current->d_value)) % size; 00261 BucketNode* new_bucket = copy[new_index]; 00262 current->d_next = new_bucket; 00263 copy[new_index] = current; 00264 } 00265 d_data[i] = NULL; 00266 } 00267 00268 d_data.swap(copy); 00269 } 00270 } 00271 00272 00273 public: 00274 /// constructors 00275 00276 // default size is 16 buckets 00277 hash_table() : 00278 d_hash(_HashFcn()), d_equal(_EqualKey()), d_extractKey(_ExtractKey()), 00279 d_size(0), d_data(16) 00280 { 00281 init(); 00282 }; 00283 00284 // specifiy initial number of buckets - must be positive 00285 hash_table(size_type initial_capacity) : 00286 d_hash(_HashFcn()), d_equal(_EqualKey()), d_extractKey(_ExtractKey()), 00287 d_size(0), d_data(initial_capacity) 00288 { 00289 init(); 00290 }; 00291 00292 // specifiy initial number of buckets and hash function 00293 hash_table(size_type initial_capacity, const _HashFcn& hash) : 00294 d_hash(hash), d_equal(_EqualKey()), d_extractKey(_ExtractKey()), 00295 d_size(0), d_data(initial_capacity) 00296 { 00297 init(); 00298 }; 00299 00300 // specifiy initial number of buckets, hash and equal function 00301 hash_table(size_type initial_capacity, 00302 const _HashFcn& hash, const _EqualKey& equal) : 00303 d_hash(hash), d_equal(equal), d_extractKey(_ExtractKey()), 00304 d_size(0), d_data(initial_capacity) 00305 { 00306 init(); 00307 }; 00308 00309 // copy hash table 00310 hash_table(const hash_table& other) : 00311 d_hash(other.d_hash), d_equal(other.d_equal), d_extractKey(other.d_extractKey), 00312 d_size(other.d_size), d_data(0) 00313 { 00314 assignTable(other.d_data); 00315 }; 00316 00317 ~hash_table() { 00318 clear(); 00319 } 00320 00321 // assign hash table 00322 hash_table& operator=(const hash_table& other) { 00323 if (this != &other) { 00324 clear(); 00325 00326 d_hash = other.d_hash; 00327 d_equal = other.d_equal; 00328 d_extractKey = other.d_extractKey; 00329 d_size = other.d_size; 00330 assignTable(other.d_data); 00331 } 00332 00333 return *this; 00334 } 00335 00336 00337 // replaces the current hash table with the given one 00338 void assignTable(const Data& data) { 00339 // copy elements: 00340 // default assignment operator does not work if value_type contains 00341 // constants, which should be the case as the key should be constant. 00342 // so not even shrinking a vector is possible, 00343 // so create a new table instead and swap with the current one. 00344 Data tmp(data.size()); 00345 d_data.swap(tmp); 00346 00347 // for each bucket ... 00348 for (size_type i = 0; i < data.size(); ++i) { 00349 // .. copy each element 00350 DebugAssert(i < d_data.size(),"hash_table::operator="); 00351 00352 // copy bucket if not empty 00353 Bucket* source = data[i]; 00354 if (source != NULL) { 00355 // set first element 00356 Bucket* target = new BucketNode(NULL, source->d_value); 00357 d_data[i] = target; 00358 source = source->d_next; 00359 00360 // copy remaining nodes 00361 while (source != NULL) { 00362 target->d_next = new BucketNode(NULL, source->d_value); 00363 target = target->d_next; 00364 source = source->d_next; 00365 } 00366 } 00367 } 00368 } 00369 00370 void swap(hash_table& other) { 00371 std::swap(d_hash, other.d_hash); 00372 std::swap(d_equal, other.d_equal); 00373 std::swap(d_extractKey, other.d_extractKey); 00374 std::swap(d_size, other.d_size); 00375 d_data.swap(other.d_data); 00376 } 00377 00378 // sets all buckets to NULL 00379 void init() { 00380 for (size_type i = 0; i < d_data.size(); ++i) { 00381 d_data[i] = NULL; 00382 } 00383 } 00384 00385 // empty all buckets 00386 void clear() { 00387 d_size = 0; 00388 00389 for (size_type i = 0; i < d_data.size(); ++i) { 00390 BucketNode* head = d_data[i]; 00391 while (head != NULL) { 00392 BucketNode* next = head->d_next; 00393 delete head; 00394 head = next; 00395 } 00396 d_data[i] = NULL; 00397 } 00398 } 00399 00400 00401 00402 /// operations 00403 00404 00405 // returns end iterator if key was not bound 00406 iterator find(const key_type& key) { 00407 for (BucketNode* node = getBucketByKey(key); node != NULL; node = node->d_next) { 00408 if (equal(extractKey(node->d_value), key)) { 00409 return iterator(this, node); 00410 } 00411 } 00412 return end(); 00413 } 00414 00415 // const version of find 00416 const_iterator find(const key_type& key) const { 00417 for (const BucketNode* node = getBucketByKey(key); node != NULL; node = node->d_next) { 00418 if (equal(extractKey(node->d_value), key)) { 00419 return const_iterator(this, node); 00420 } 00421 } 00422 return end(); 00423 } 00424 00425 00426 // adds the mapping from key to data, if key is still unbound 00427 // otherwise returns false 00428 std::pair<iterator, bool> insert(const value_type& value) { 00429 // resize in case we insert 00430 resize(); 00431 00432 const key_type& key = extractKey(value); 00433 size_type index = getBucketIndex(key); 00434 00435 // check if key is already bound 00436 for (BucketNode* node = d_data[index]; node != NULL; node = node->d_next) { 00437 if (equal(extractKey(node->d_value), key)) { 00438 return std::make_pair(end(), false); 00439 } 00440 } 00441 00442 // insert new value 00443 ++d_size; 00444 d_data[index] = new BucketNode(d_data[index], value); 00445 return std::make_pair(iterator(this, d_data[index]), true); 00446 } 00447 00448 // if key in value is already bound, 00449 // returns that bindings, 00450 // otherwise inserts value and returns it. 00451 value_type& find_or_insert(const value_type& value) { 00452 // resize in case we insert 00453 resize(); 00454 00455 const key_type& key = extractKey(value); 00456 size_type index = getBucketIndex(key); 00457 00458 // check if key is already bound 00459 for (BucketNode* node = d_data[index]; node != NULL; node = node->d_next) { 00460 if (equal(extractKey(node->d_value), key)) { 00461 return node->d_value; 00462 } 00463 } 00464 00465 // insert new value 00466 ++d_size; 00467 d_data[index] = new BucketNode(d_data[index], value); 00468 return d_data[index]->d_value; 00469 } 00470 00471 00472 // removes binding of key 00473 // returns number of keys removed, 00474 // i.e. 1 if key was bound, 0 if key was not bound. 00475 size_type erase(const key_type& key) { 00476 size_type index = getBucketIndex(key); 00477 00478 // keep track of the node previous to the current one 00479 BucketNode* prev = NULL; 00480 for (BucketNode* node = d_data[index]; node != NULL; node = node->d_next) { 00481 if (equal(extractKey(node->d_value), key)) { 00482 --d_size; 00483 00484 // remove the bucket's head 00485 if (prev == NULL) { 00486 d_data[index] = node->d_next; 00487 } 00488 // remove within the list; 00489 else { 00490 prev->d_next = node->d_next; 00491 } 00492 delete node; 00493 return 1; 00494 } 00495 00496 prev = node; 00497 } 00498 00499 return 0; 00500 } 00501 00502 // removes element pointed to by iter, 00503 // returns element after iter. 00504 const_iterator erase(const const_iterator& iter) { 00505 const_iterator next(iter); 00506 ++next; 00507 00508 const key_type& key = extractKey(*iter); 00509 size_type index = getBucketIndex(key); 00510 00511 // keep track of the node previous to the current one 00512 BucketNode* prev = NULL; 00513 for (BucketNode* node = d_data[index]; node != NULL; node = node->d_next) { 00514 if (equal(extractKey(node->d_value), key)) { 00515 --d_size; 00516 00517 // remove the bucket's head 00518 if (prev == NULL) { 00519 d_data[index] = node->d_next; 00520 } 00521 // remove within the list; 00522 else { 00523 prev->d_next = node->d_next; 00524 } 00525 delete node; 00526 return next; 00527 } 00528 00529 prev = node; 00530 } 00531 00532 return next; 00533 } 00534 00535 00536 /// status 00537 00538 // is the key bound? 00539 bool contains(const key_type& key) const { 00540 return (find(key) != end()); 00541 } 00542 00543 // returns the number of times a key is bound, 00544 // i.e. 0 or 1 00545 size_type count(const _Key& key) const { 00546 if (contains(key)) { 00547 return 1; 00548 } 00549 else { 00550 return 0; 00551 } 00552 } 00553 00554 // is the hash table empty? 00555 bool empty() const { 00556 return (d_size == 0); 00557 } 00558 00559 // the number of elements in the hash table 00560 size_type size() const { 00561 return d_size; 00562 } 00563 00564 // the number of buckets in the hash table 00565 size_type bucket_count() const { 00566 return d_data.size(); 00567 } 00568 00569 // returns the average number of elements per bucket 00570 float load_factor() const { 00571 return (float(d_size) / float(d_data.size())); 00572 } 00573 00574 00575 00576 /// iterators 00577 00578 // returns forward iterator to iterate over all key/data pairs 00579 iterator begin() { 00580 if (d_size > 0) { 00581 // find first non-empty bucket 00582 size_type index = 0; 00583 while (d_data[index] == NULL) { 00584 ++index; 00585 } 00586 00587 return iterator(this, d_data[index]); 00588 } 00589 else { 00590 return end(); 00591 } 00592 } 00593 00594 // const version of begin 00595 const_iterator begin() const { 00596 if (d_size > 0) { 00597 // find first non-empty bucket 00598 size_type index = 0; 00599 while (d_data[index] == NULL) { 00600 ++index; 00601 } 00602 00603 return const_iterator(this, d_data[index]); 00604 } 00605 else { 00606 return end(); 00607 } 00608 } 00609 00610 00611 // returns end iterator 00612 iterator end() { 00613 return iterator(this, NULL); 00614 } 00615 00616 // const version of end 00617 const_iterator end() const { 00618 return const_iterator(this, NULL); 00619 } 00620 00621 00622 00623 00624 /// inner classes 00625 00626 00627 00628 00629 // iterator over hashtable elements 00630 00631 // modifying the hash table leaves iterator intact, 00632 // unless the key in the value it points to is modified/deleted 00633 class iterator { 00634 friend class hash_table; 00635 friend class const_iterator; 00636 00637 /// variables 00638 00639 protected: 00640 // the hash table of this iterator 00641 hash_table* d_hash_table; 00642 // iterator points to current element in some bucket 00643 BucketNode* d_node; 00644 00645 00646 /// methods 00647 protected: 00648 // used by hash_table to create an iterator 00649 iterator(hash_table* hash_table, BucketNode* node) 00650 : d_hash_table(hash_table), d_node(node) 00651 { } 00652 00653 public: 00654 // public default constructor, 00655 // leaves the iterator in undefined state. 00656 iterator() 00657 : d_hash_table(NULL), d_node(NULL) 00658 { } 00659 00660 // copy constructor 00661 iterator(const iterator& other) 00662 : d_hash_table(other.d_hash_table), d_node(other.d_node) 00663 { } 00664 00665 // assignment 00666 iterator& operator=(const iterator& other) { 00667 if (this != &other) { 00668 d_hash_table = other.d_hash_table; 00669 d_node = other.d_node; 00670 } 00671 00672 return *this; 00673 } 00674 00675 // go to next data - pre-increment 00676 iterator& operator++() { 00677 // must not be the end iterator 00678 DebugAssert(d_node != NULL, "hash operator++"); 00679 00680 // get current bucket index 00681 size_type index = d_hash_table->getBucketIndex(d_hash_table->extractKey(d_node->d_value)); 00682 00683 // go to next entry in bucket 00684 d_node = d_node->d_next; 00685 00686 // while current bucket empty 00687 while (d_node == NULL) { 00688 // go to next bucket 00689 ++index; 00690 00691 // all buckets exhausted 00692 if (index == d_hash_table->d_data.size()) { 00693 // unfortunately this does not work, as end() returns a tmp value 00694 // return d_hash_table->end(); 00695 *this = d_hash_table->end(); 00696 return *this; 00697 } 00698 DebugAssert(index < d_hash_table->d_data.size(), 00699 "hash operator++ 2"); 00700 00701 d_node = d_hash_table->getBucketByIndex(index); 00702 } 00703 00704 return *this; 00705 }; 00706 00707 // go to next data - post-increment 00708 iterator operator++(int) { 00709 iterator tmp = *this; 00710 ++(*this); 00711 return tmp; 00712 } 00713 00714 value_type& operator*() const { 00715 return d_node->d_value; 00716 } 00717 00718 value_type* operator->() const { 00719 return &(operator*()); 00720 } 00721 00722 // are two iterator identical? 00723 bool operator==(const iterator& other) const { 00724 // if the same bucket iterator, then it must be the same hash table 00725 DebugAssert(d_node == NULL || d_node != other.d_node || d_hash_table == other.d_hash_table, "hash operator=="); 00726 return (d_node == other.d_node); 00727 } 00728 00729 // negation of == 00730 bool operator!=(const iterator& other) const { 00731 return !(*this == other); 00732 } 00733 }; 00734 00735 00736 00737 00738 // const iterator over hashtable elements 00739 00740 // modifying the hash table leaves iterator intact, 00741 // unless the key in the value it points to is modified/deleted 00742 class const_iterator { 00743 friend class hash_table; 00744 00745 /// variables 00746 00747 protected: 00748 // the hash table of this iterator 00749 const hash_table* d_hash_table; 00750 // iterator points to current element in some bucket 00751 const BucketNode* d_node; 00752 00753 00754 /// methods 00755 protected: 00756 // used by hash_table to create an iterator 00757 const_iterator(hash_table const* hash_table, const BucketNode* node) 00758 : d_hash_table(hash_table), d_node(node) 00759 { } 00760 00761 public: 00762 // public default constructor, 00763 // leaves the iterator in undefined state. 00764 const_iterator() 00765 : d_hash_table(NULL), d_node(NULL) 00766 { } 00767 00768 // copy constructor 00769 const_iterator(const const_iterator& other) 00770 : d_hash_table(other.d_hash_table), d_node(other.d_node) 00771 { } 00772 00773 // conversion constructor from non-const iterator 00774 const_iterator(const iterator& other) 00775 : d_hash_table(other.d_hash_table), d_node(other.d_node) 00776 { } 00777 00778 // assignment 00779 const_iterator& operator=(const const_iterator& other) { 00780 if (this != &other) { 00781 d_hash_table = other.d_hash_table; 00782 d_node = other.d_node; 00783 } 00784 00785 return *this; 00786 } 00787 00788 // go to next data - pre-increment 00789 const_iterator& operator++() { 00790 // must not be the end iterator 00791 DebugAssert(d_node != NULL, ""); 00792 00793 // get current bucket index 00794 size_type index = d_hash_table->getBucketIndex(d_hash_table->extractKey(d_node->d_value)); 00795 00796 // go to next entry in bucket 00797 d_node = d_node->d_next; 00798 00799 // while current bucket empty 00800 while (d_node == NULL) { 00801 // go to next bucket 00802 ++index; 00803 00804 // all buckets exhausted 00805 if (index == d_hash_table->d_data.size()) { 00806 // unfortunately this does not work, as end() returns a tmp value 00807 // return d_hash_table->end(); 00808 *this = d_hash_table->end(); 00809 return *this; 00810 } 00811 DebugAssert(index < d_hash_table->d_data.size(),""); 00812 00813 d_node = d_hash_table->getBucketByIndex(index); 00814 } 00815 00816 return *this; 00817 }; 00818 00819 // go to next data - post-increment 00820 const_iterator operator++(int) { 00821 const_iterator tmp = *this; 00822 ++(*this); 00823 return tmp; 00824 } 00825 00826 const value_type& operator*() const { 00827 return d_node->d_value; 00828 } 00829 00830 const value_type* operator->() const { 00831 return &(operator*()); 00832 } 00833 00834 // are two iterator identical? 00835 bool operator==(const const_iterator& other) const { 00836 // if the same bucket iterator, then it must be the same hash table 00837 DebugAssert(d_node == NULL || d_node != other.d_node || d_hash_table == other.d_hash_table,""); 00838 return (d_node == other.d_node); 00839 } 00840 00841 // negation of == 00842 bool operator!=(const const_iterator& other) const { 00843 return !(*this == other); 00844 } 00845 }; 00846 }; 00847 00848 } 00849 00850 #endif