手写一个自己的LocalCache - 基于LinkedHashMap实现LRU

功能目标

     实现一个全局范围的LocalCache,各个业务点使用自己的Namespace对LocalCache进行逻辑分区,所以在LocalCache中进行读写采用的key为(namespace+(分隔符)+数据key),如存在以下的一对keyValue :  NameToAge,Troy -> 23 。要求LocalCache线程安全,且LocalCache中总keyValue数量可控,提供清空,调整大小,dump到本地文件等一系列操作。

用LinkedHashMap实现LRU Map

     LinkedHashMap提供了键值对的储存功能,且可根据其支持访问排序的特性来模拟LRU算法。简单来说,LinkedHashMap在访问已存在元素或插入新元素时,会将该元素放置在链表的尾部,所以在链表头部的元素是最近最少未使用的元素,而这正是LRU算法的描述。由于其底层基于链表实现,所以对于元素的移动和插入操作性能表现优异。我们将利用一个LinkedHashMap实现一个线程安全的LRU Map。

LRU Map的实现

public class LRUMap<T> extends LinkedHashMap<String, SoftReference<T>> implements Externalizable {

    private static final long serialVersionUID = -7076355612133906912L;

    /** The maximum size of the cache. */
    private int maxCacheSize;

    /* lock for map */
    private final Lock lock = new ReentrantLock();

    /**
     * 默认构造函数,LRUMap的大小为Integer.MAX_VALUE
     */
    public LRUMap() {
        super();
        maxCacheSize = Integer.MAX_VALUE;
    }

    /**
     * Constructs a new, empty cache with the specified maximum size.
     */
    public LRUMap(int size) {
        super(size + 1, 1f, true);
        maxCacheSize = size;
    }

    /**
     * 让LinkHashMap支持LRU,如果Map的大小超过了预定值,则返回true,LinkedHashMap自身实现返回
     * fasle,即永远不删除元素
     */
    @Override
    protected boolean removeEldestEntry(Map.Entry<String, SoftReference<T>> eldest) {
        boolean tmp = (size() > maxCacheSize);
        return tmp;
    }

    public T addEntry(String key, T entry) {
        try {
            SoftReference<T> sr_entry = new SoftReference<T>(entry);
            // add entry to hashmap
            lock.lock();
            put(key, sr_entry);
        }
        finally {
            lock.unlock();
        }
        return entry;
    }

    public T getEntry(String key) {
        SoftReference<T> sr_entry;
        try {
            lock.lock();
            if ((sr_entry = get(key)) == null)
                return null;
            // if soft reference is null then the entry has been
            // garbage collected and so the key should be removed also.
            if (sr_entry.get() == null) {
                remove(key);
                return null;
            }
        }
        finally {
            lock.unlock();
        }
        return sr_entry.get();
    }

    @Override
    public SoftReference<T> remove(Object key) {
        try {
            lock.lock();
            return super.remove(key);
        }
        finally {
            lock.unlock();
        }
    }

    @Override
    public synchronized void clear() {
        super.clear();
    }

    public void writeExternal(ObjectOutput out) throws IOException {
        Iterator<Map.Entry<String, SoftReference<T>>> i = (size() > 0) ? entrySet().iterator() : null;
        // Write out size
        out.writeInt(size());
        // Write out keys and values
        if (i != null) {
            while (i.hasNext()) {
                Map.Entry<String, SoftReference<T>> e = i.next();
                if (e != null && e.getValue() != null && e.getValue().get() != null) {
                    out.writeObject(e.getKey());
                    out.writeObject(e.getValue().get());
                }
            }
        }
    }

    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
        // Read in size
        int size = in.readInt();
        // Read the keys and values, and put the mappings in the Map
        for (int i = 0; i < size; i++) {
            String key = (String) in.readObject();
            @SuppressWarnings("unchecked")
            T value = (T) in.readObject();
            addEntry(key, value);
        }
    }

}
   

LocalCache设计

     如果在LocalCache中只使用一个LRU Map,将产生性能问题:1. 单个LinkedHashMap中元素数量太多 2. 高并发下读写锁限制。
     所以可以在LocalCache中使用多个LRU Map,并使用key 来 hash到某个LRU Map上,以此来提高在单个LinkedHashMap中检索的速度以及提高整体并发度。

LocalCache实现

     这里hash选用了Wang/Jenkins hash算法。实现Hash的方式参考了ConcurrentHashMap的实现。
public class LocalCache{

     private final int size;
    /**
     * 本地缓存最大容量
     */
    static final int MAXIMUM_CAPACITY = 1 << 30;

    /**
     * 本地缓存支持最大的分区数
     */
    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative

    /**
     * 本地缓存存储的LRUMap数组
     */
    LRUMap<CacheObject>[] segments;

    /**
     * Mask value for indexing into segments. The upper bits of a key's hash
     * code are used to choose the segment.
     */
    int segmentMask;

    /**
     * Shift value for indexing within segments.
     */
    int segmentShift;

    /**
     * 
     * 计数器重置阀值
     */
    private static final int MAX_LOOKUP = 100000000;

    /**
     * 用于重置计数器的锁,防止多次重置计数器
     */
    private final Lock lock = new ReentrantLock();

    /**
     * Number of requests made to lookup a cache entry.
     */
    private AtomicLong lookup = new AtomicLong(0);

    /**
     * Number of successful requests for cache entries.
     */
    private AtomicLong found = new AtomicLong(0);
    
    public LocalCacheServiceImpl(int size) {
          this.size = size;
     }


     public CacheObject get(String key) {
        if (StringUtils.isBlank(key)) {
            return null;
        }
        // 增加计数器
        lookup.incrementAndGet();

        // 如果必要重置计数器
        if (lookup.get() > MAX_LOOKUP) {
            if (lock.tryLock()) {
                try {
                    lookup.set(0);
                    found.set(0);
                }
                finally {
                    lock.unlock();
                }
            }
        }

        int hash = hash(key.hashCode());
        CacheObject ret = segmentFor(hash).getEntry(key);
        if (ret != null)
            found.incrementAndGet();
        return ret;
    }


    public void remove(String key) {
        if (StringUtils.isBlank(key)) {
            return;
        }
        int hash = hash(key.hashCode());
        segmentFor(hash).remove(key);
        return;
    }

    public void put(String key, CacheObject val) {
        if (StringUtils.isBlank(key) || val == null) {
            return;
        }
        int hash = hash(key.hashCode());
        segmentFor(hash).addEntry(key, val);
        return;
    }

    public synchronized void clearCache() {
        for (int i = 0; i < segments.length; ++i)
            segments[i].clear();
    }

    public synchronized void reload() throws Exception {
       clearCache();
       init();
    }

    public synchronized void dumpLocalCache() throws Exception {
        for (int i = 0; i < segments.length; ++i) {
            String tmpDir = System.getProperty("java.io.tmpdir");
            String fileName = tmpDir + File.separator + "localCache-dump-file" + i + ".cache";
            File file = new File(fileName);
            ObjectUtils.objectToFile(segments[i], file);
        }
    }

    @SuppressWarnings("unchecked")
    public synchronized void restoreLocalCache() throws Exception {
        for (int i = 0; i < segments.length; ++i) {
            String tmpDir = System.getProperty("java.io.tmpdir");
            String fileName = tmpDir + File.separator + "localCache-dump-file" + i + ".cache";
            File file = new File(fileName);
            LRUMap<CacheObject> lruMap = (LRUMap<CacheObject>) ObjectUtils.fileToObject(file);
            if (lruMap != null) {
                Set<Entry<String, SoftReference<CacheObject>>> set = lruMap.entrySet();
                Iterator<Entry<String, SoftReference<CacheObject>>> it = set.iterator();
                while (it.hasNext()) {
                    Entry<String, SoftReference<CacheObject>> entry = it.next();
                    if (entry.getValue() != null && entry.getValue().get() != null)
                        segments[i].addEntry(entry.getKey(), entry.getValue().get());
                }
            }
        }
    }


    /**
     * 本地缓存命中次数,在计数器RESET的时刻可能会出现0的命中率
     */
    public int getHitRate() {
        long query = lookup.get();
        return query == 0 ? 0 : (int) ((found.get() * 100) / query);
    }

    /**
     * 本地缓存访问次数,在计数器RESET时可能会出现0的查找次数
     */
    public long getCount() {
        return lookup.get();
    }

    public int size() {
        final LRUMap<CacheObject>[] segments = this.segments;
        long sum = 0;
        for (int i = 0; i < segments.length; ++i) {
            sum += segments[i].size();
        }
        if (sum > Integer.MAX_VALUE)
            return Integer.MAX_VALUE;
        else
            return (int) sum;
    }


    /**
     * Returns the segment that should be used for key with given hash
     * 
     * @param hash
     *            the hash code for the key
     * @return the segment
     */
    final LRUMap<CacheObject> segmentFor(int hash) {
        return segments[(hash >>> segmentShift) & segmentMask];
    }


    /* ---------------- Small Utilities -------------- */

    /**
     * Applies a supplemental hash function to a given hashCode, which defends
     * against poor quality hash functions. This is critical because
     * ConcurrentHashMap uses power-of-two length hash tables, that otherwise
     * encounter collisions for hashCodes that do not differ in lower or upper
     * bits.
     */
    private static int hash(int h) {
        // Spread bits to regularize both segment and index locations,
        // using variant of single-word Wang/Jenkins hash.
        h += (h << 15) ^ 0xffffcd7d;
        h ^= (h >>> 10);
        h += (h << 3);
        h ^= (h >>> 6);
        h += (h << 2) + (h << 14);
        return h ^ (h >>> 16);
    }

    @SuppressWarnings("unchecked")
    public void init() throws Exception {
        int concurrencyLevel = 16;
        int capacity = size;
        if (capacity < 0 || concurrencyLevel <= 0)
            throw new IllegalArgumentException();
        if (concurrencyLevel > MAX_SEGMENTS)
            concurrencyLevel = MAX_SEGMENTS;
        // Find power-of-two sizes best matching arguments
        int sshift = 0;
        int ssize = 1;
        while (ssize < concurrencyLevel) {
            ++sshift;
            ssize <<= 1;
        }
        segmentShift = 32 - sshift;
        segmentMask = ssize - 1;
        this.segments = new LRUMap[ssize];
        if (capacity > MAXIMUM_CAPACITY)
            capacity = MAXIMUM_CAPACITY;
        int c = capacity / ssize;
        if (c * ssize < capacity)
            ++c;
        int cap = 1;
        while (cap < c)
            cap <<= 1;
        cap >>= 1;
        for (int i = 0; i < this.segments.length; ++i)
            this.segments[i] = new LRUMap<CacheObject>(cap);
    }
}


郑重声明:本站内容如果来自互联网及其他传播媒体,其版权均属原媒体及文章作者所有。转载目的在于传递更多信息及用于网络分享,并不代表本站赞同其观点和对其真实性负责,也不构成任何其他建议。