C++實現LRU緩存算法
LRU是Least Recently Used的縮寫,意思是最近最少使用,它是一種Cache替換算法。
實現思路: hashtable + 雙向鏈表
時間復雜度: 插入,查找,刪除:O(1)
空間使用情況: O(N) :一個鏈表存儲K個數據(stl的hash_map實際占的空間比較大).
運行環境:
linux:redhat , fedora ,centos等(理論上ubuntu , debian,mac os等也可以運行)
代碼:
#ifndef __LRUCACHE_H__ #define __LRUCACHE_H__ #include <vector> #include <ext/hash_map> #include <pthread.h> #include <assert.h> using namespace __gnu_cxx; template <class K, class D> struct Node{ K key; D data; Node *prev, *next; }; template <class K, class D> class LRUCache{ public: LRUCache(size_t size , bool is_pthread_safe = false){ if(size <= 0) size = 1024; pthread_safe = is_pthread_safe; if(pthread_safe) pthread_mutex_init(&cached_mutex , NULL); entries = new Node<K,D>[size]; for(size_t i = 0; i < size; ++i) cached_entries.push_back(entries + i); head = new Node<K,D>; tail = new Node<K,D>; head->prev = NULL; head->next = tail; tail->prev = head; tail->next = NULL; } ~LRUCache(){ if(pthread_safe) pthread_mutex_destroy(&cached_mutex); delete head; delete tail; delete[] entries; } void Put(K key, D data); D Get(K key); private: void cached_lock(void){ if(pthread_safe) pthread_mutex_lock(&cached_mutex); } void cached_unlock(void){ if(pthread_safe) pthread_mutex_unlock(&cached_mutex); } void detach(Node<K,D>* node){ node->prev->next = node->next; node->next->prev = node->prev; } void attach(Node<K,D>* node){ node->prev = head; node->next = head->next; head->next = node; node->next->prev = node; } private: hash_map<K, Node<K,D>* > cached_map; vector<Node<K,D>* > cached_entries; Node<K,D> * head, *tail; Node<K,D> * entries; bool pthread_safe; pthread_mutex_t cached_mutex; }; template<class K , class D> void LRUCache<K,D>::Put(K key , D data){ cached_lock(); Node<K,D> *node = cached_map[key]; if(node){ detach(node); node->data = data; attach(node); } else{ if(cached_entries.empty()){ node = tail->prev; detach(node); cached_map.erase(node->key); } else{ node = cached_entries.back(); cached_entries.pop_back(); } node->key = key; node->data = data; cached_map[key] = node; attach(node); } cached_unlock(); } template<class K , class D> D LRUCache<K,D>::Get(K key){ cached_lock(); Node<K,D> *node = cached_map[key]; if(node){ detach(node); attach(node); cached_unlock(); return node->data; } else{ cached_unlock(); return D(); } } #endif測試用例:
/* Compile: g++ -o app app.cpp LRUCache.cpp -lpthread Run: ./app */ #include <iostream> #include <string> #include "LRUCache.h" using namespace std; int main(void){ //int k = 10 , // max = 100; int k = 100000 , max = 1000000; LRUCache<int , int> * lru_cache = new LRUCache<int , int>(k , true); int tmp = 0; for(int i = 0 ; i < 2*k ; ++i){ tmp = rand() % max; lru_cache->Put(tmp, tmp + 1000000); cout<<tmp<<endl; } for(int i = 0 ; i < k ; ++i){ tmp = rand() % max; if(lru_cache->Get(tmp) == 0) cout<<"miss : "<<tmp<<endl; else cout<<"hit : "<<tmp<<" value : "<<lru_cache->Get(tmp)<<endl; } delete lru_cache; return 0; }
其實,上面的代碼,有一些毛病的。改天我會繼續改進。
例如:
1:冗余操作。cached_entries完全可以用一個counter代替。
2:過度抽象。
3:Get、Put的interface不合理。如果真的去實現一個磁盤block的LRU cache,就會發現之前的接口需要重寫了。
不過對于大家理解LRU算法。應該有一定的幫助的。
本文由用戶 jopen 自行上傳分享,僅供網友學習交流。所有權歸原作者,若您的權利被侵害,請聯系管理員。
轉載本站原創文章,請注明出處,并保留原始鏈接、圖片水印。
本站是一個以用戶分享為主的開源技術平臺,歡迎各類分享!