Problem Background
In high-performance system architectures, the choice and implementation of caching strategies directly impact system performance. Traditional single LRU implementations often lack extensibility, requiring complete rewrites when business requirements change (such as supporting LFU, FIFO, etc.).
OAVOService Interview Support Team has found through years of coaching system design interviews at Google, Amazon, Meta, and other big tech companies that extensible cache design is a classic topic for evaluating candidates' object-oriented design capabilities and architectural thinking.
Problem Description
Problem 2 — Design an Extensible LRU Cache
Design an extensible LRU cache class structure and implementation. Focus on extensibility, as more eviction policies like LFU (Least Frequently Used) will be added later.
Core Requirements:
- Implement basic LRU cache functionality (O(1) get/put operations)
- Design extensible architecture supporting multiple eviction strategies
- Consider future addition of LFU, FIFO, Random strategies
- Good interface abstraction and code reusability
Architecture Design
1. Strategy Pattern + Factory Pattern
from abc import ABC, abstractmethod
from typing import Any, Optional
import threading
class EvictionStrategy(ABC):
"""Abstract base class for eviction strategies"""
@abstractmethod
def on_access(self, key: Any) -> None:
"""Callback when key is accessed"""
pass
@abstractmethod
def on_insert(self, key: Any, value: Any) -> None:
"""Callback when new key-value pair is inserted"""
pass
@abstractmethod
def evict(self) -> Any:
"""Returns the key to be evicted"""
pass
@abstractmethod
def remove(self, key: Any) -> None:
"""Remove specified key"""
pass
@abstractmethod
def size(self) -> int:
"""Returns current size"""
pass
class CacheNode:
"""Doubly linked list node"""
def __init__(self, key=None, value=None):
self.key = key
self.value = value
self.prev = None
self.next = None
self.frequency = 1 # Reserved for LFU strategy
self.timestamp = 0 # Reserved for other strategies
2. LRU Strategy Implementation
import time
class LRUStrategy(EvictionStrategy):
"""LRU eviction strategy implementation"""
def __init__(self):
# Doubly linked list sentinel nodes
self.head = CacheNode()
self.tail = CacheNode()
self.head.next = self.tail
self.tail.prev = self.head
self.key_to_node = {}
def _add_to_head(self, node: CacheNode) -> None:
"""Add node to head"""
node.prev = self.head
node.next = self.head.next
self.head.next.prev = node
self.head.next = node
def _remove_node(self, node: CacheNode) -> None:
"""Remove specified node"""
node.prev.next = node.next
node.next.prev = node.prev
def _move_to_head(self, node: CacheNode) -> None:
"""Move node to head"""
self._remove_node(node)
self._add_to_head(node)
def on_access(self, key: Any) -> None:
"""LRU access: move to head"""
if key in self.key_to_node:
node = self.key_to_node[key]
self._move_to_head(node)
def on_insert(self, key: Any, value: Any) -> None:
"""LRU insert: add to head"""
node = CacheNode(key, value)
self.key_to_node[key] = node
self._add_to_head(node)
def evict(self) -> Any:
"""LRU eviction: remove tail node"""
last_node = self.tail.prev
if last_node != self.head:
self._remove_node(last_node)
del self.key_to_node[last_node.key]
return last_node.key
return None
def remove(self, key: Any) -> None:
"""Remove specified key"""
if key in self.key_to_node:
node = self.key_to_node[key]
self._remove_node(node)
del self.key_to_node[key]
def size(self) -> int:
return len(self.key_to_node)
def get_node(self, key: Any) -> Optional[CacheNode]:
return self.key_to_node.get(key)
3. Core Cache Class
class ExtensibleCache:
"""Extensible cache core class"""
def __init__(self, capacity: int, strategy: EvictionStrategy):
self.capacity = capacity
self.strategy = strategy
self._lock = threading.RLock() # Thread safety
def get(self, key: Any) -> Any:
"""Get cached value O(1)"""
with self._lock:
node = self.strategy.get_node(key) if hasattr(self.strategy, 'get_node') else None
if node:
self.strategy.on_access(key)
return node.value
return None
def put(self, key: Any, value: Any) -> None:
"""Set cached value O(1)"""
with self._lock:
# Check if already exists
existing_node = self.strategy.get_node(key) if hasattr(self.strategy, 'get_node') else None
if existing_node:
existing_node.value = value
self.strategy.on_access(key)
return
# Check capacity limit
if self.strategy.size() >= self.capacity:
evicted_key = self.strategy.evict()
if evicted_key is None:
raise RuntimeError("Failed to evict item from full cache")
# Insert new item
self.strategy.on_insert(key, value)
def remove(self, key: Any) -> bool:
"""Remove specified key"""
with self._lock:
if hasattr(self.strategy, 'get_node') and self.strategy.get_node(key):
self.strategy.remove(key)
return True
return False
def size(self) -> int:
"""Current cache size"""
return self.strategy.size()
def clear(self) -> None:
"""Clear cache"""
with self._lock:
# Reinitialize strategy
self.strategy = type(self.strategy)()
Extended Strategy Implementations
1. LFU Strategy Extension
from collections import defaultdict
import heapq
class LFUStrategy(EvictionStrategy):
"""LFU (Least Frequently Used) strategy"""
def __init__(self):
self.key_to_node = {}
self.freq_to_keys = defaultdict(set)
self.min_frequency = 0
self.key_to_freq = {}
def _update_frequency(self, key: Any) -> None:
"""Update key frequency"""
freq = self.key_to_freq.get(key, 0)
# Remove from old frequency group
if freq > 0:
self.freq_to_keys[freq].discard(key)
if freq == self.min_frequency and not self.freq_to_keys[freq]:
self.min_frequency += 1
# Add to new frequency group
new_freq = freq + 1
self.key_to_freq[key] = new_freq
self.freq_to_keys[new_freq].add(key)
def on_access(self, key: Any) -> None:
if key in self.key_to_node:
self._update_frequency(key)
def on_insert(self, key: Any, value: Any) -> None:
node = CacheNode(key, value)
self.key_to_node[key] = node
self.key_to_freq[key] = 1
self.freq_to_keys[1].add(key)
self.min_frequency = 1
def evict(self) -> Any:
if not self.key_to_node:
return None
# Find lowest frequency key
evict_key = self.freq_to_keys[self.min_frequency].pop()
del self.key_to_node[evict_key]
del self.key_to_freq[evict_key]
return evict_key
def remove(self, key: Any) -> None:
if key in self.key_to_node:
freq = self.key_to_freq[key]
self.freq_to_keys[freq].discard(key)
del self.key_to_node[key]
del self.key_to_freq[key]
def size(self) -> int:
return len(self.key_to_node)
def get_node(self, key: Any) -> Optional[CacheNode]:
return self.key_to_node.get(key)
2. Cache Factory
class CacheFactory:
"""Cache factory class"""
@staticmethod
def create_cache(cache_type: str, capacity: int) -> ExtensibleCache:
"""Create specified type of cache"""
strategies = {
'lru': LRUStrategy,
'lfu': LFUStrategy,
'fifo': FIFOStrategy, # Can extend further
'random': RandomStrategy,
}
if cache_type.lower() not in strategies:
raise ValueError(f"Unsupported cache type: {cache_type}")
strategy = strategies[cache_type.lower()]()
return ExtensibleCache(capacity, strategy)
# Usage example
def demo_usage():
# Create LRU cache
lru_cache = CacheFactory.create_cache('lru', 3)
lru_cache.put('a', 1)
lru_cache.put('b', 2)
lru_cache.put('c', 3)
print(lru_cache.get('a')) # 1, 'a' accessed
lru_cache.put('d', 4) # Evict 'b'
print(lru_cache.get('b')) # None
# Create LFU cache
lfu_cache = CacheFactory.create_cache('lfu', 2)
lfu_cache.put('x', 10)
lfu_cache.put('y', 20)
lfu_cache.get('x') # x frequency increases
lfu_cache.put('z', 30) # Evict y (lowest frequency)
Advanced Features
1. TTL Support
class TTLExtension:
"""TTL (Time To Live) extension"""
def __init__(self, base_strategy: EvictionStrategy):
self.base_strategy = base_strategy
self.ttl_map = {} # key -> expiry_time
def set_ttl(self, key: Any, ttl_seconds: int) -> None:
expiry_time = time.time() + ttl_seconds
self.ttl_map[key] = expiry_time
def is_expired(self, key: Any) -> bool:
if key not in self.ttl_map:
return False
return time.time() > self.ttl_map[key]
def cleanup_expired(self) -> None:
current_time = time.time()
expired_keys = [k for k, v in self.ttl_map.items() if current_time > v]
for key in expired_keys:
self.base_strategy.remove(key)
del self.ttl_map[key]
2. Cache Statistics
class CacheStatistics:
"""Cache statistics"""
def __init__(self):
self.hits = 0
self.misses = 0
self.evictions = 0
self.puts = 0
def record_hit(self):
self.hits += 1
def record_miss(self):
self.misses += 1
def record_eviction(self):
self.evictions += 1
def record_put(self):
self.puts += 1
@property
def hit_rate(self) -> float:
total = self.hits + self.misses
return self.hits / total if total > 0 else 0.0
OAVOService Interview Tips
Design Principles Discussion
- Single Responsibility - Strategy classes only handle eviction logic
- Open-Closed Principle - Open for extension, closed for modification
- Dependency Inversion - Depend on abstractions, not concrete implementations
Common Interview Extensions
- Distributed cache consistency
- Memory usage optimization strategies
- Concurrent access performance optimization
- Persistence and recovery mechanisms
Complexity Analysis
- LRU Operations: O(1) get/put/remove
- LFU Operations: O(1) get/put, O(log k) remove (k = number of different frequencies)
- Space Complexity: O(capacity)
OAVOService Professional Interview Support - Deep expertise in system design interviews, providing real-time interview assistance for Google, Amazon, Meta, Citadel, and other top companies. Our expert team masters various design patterns and architectural principles, ensuring you demonstrate professional competence in system design interviews.
Learn about our customized system design interview solutions — from cache architectures to distributed systems, comprehensive enhancement of your design capabilities and interview performance.
Tags: LRU Cache, System Design, Design Patterns, Object-Oriented, Architecture Design, OAVOService