summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:45:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:45:31 +0000
commit4dedb84f36dbdebac96052766ddffda5a05c74af (patch)
tree3723acb55c7fc230736b5cb36c7cc9728b11ed1b
parentInitial commit. (diff)
downloadgolang-github-jellydator-ttlcache-4dedb84f36dbdebac96052766ddffda5a05c74af.tar.xz
golang-github-jellydator-ttlcache-4dedb84f36dbdebac96052766ddffda5a05c74af.zip
Adding upstream version 3.0.1.upstream/3.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.github/workflows/go.yml18
-rw-r--r--CHANGELOG.md94
-rw-r--r--LICENSE21
-rw-r--r--README.md134
-rw-r--r--bench/bench_test.go27
-rw-r--r--cache.go587
-rw-r--r--cache_test.go1087
-rw-r--r--expiration_queue.go85
-rw-r--r--expiration_queue_test.go194
-rw-r--r--go.mod17
-rw-r--r--go.sum54
-rw-r--r--item.go130
-rw-r--r--item_test.go95
-rw-r--r--metrics.go21
-rw-r--r--options.go67
-rw-r--r--options_test.go60
16 files changed, 2691 insertions, 0 deletions
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 0000000..571b298
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,18 @@
+name: Go
+on: [push, pull_request]
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Set up Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: ^1.18
+ - name: Checkout code
+ uses: actions/checkout@v3
+ - name: Run tests
+ run: go test -race -shuffle on -timeout 1m -coverprofile=covprofile ./...
+ - name: Send coverage
+ uses: shogo82148/actions-goveralls@v1
+ with:
+ path-to-profile: covprofile
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..dbfa620
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,94 @@
+# 2.11.0 (December 2021)
+
+#64: @DoubeDi added a method `GetItems` to retrieve all items in the cache. This method also triggers all callbacks associated with a normal `Get`
+
+## API changes:
+
+// GetItems returns a copy of all items in the cache. Returns nil when the cache has been closed.
+func (cache *Cache) GetItems() map[string]interface{} {
+
+# 2.10.0 (December 2021)
+
+#62 : @nikhilk1701 found a memory leak where removed items are not directly eligible for garbage collection. There are no API changes.
+
+# 2.9.0 (October 2021)
+
+#55,#56,#57 : @chenyahui was on fire and greatly improved the peformance of the library. He also got rid of the blocking call to expirationNotification, making the code run twice as fast in the benchmarks!
+
+# 2.8.1 (September 2021)
+
+#53 : Avoids recalculation of TTL value returned in API when TTL is extended. by @iczc
+
+# 2.8.0 (August 2021)
+
+#51 : The call GetWithTTL(key string) (interface{}, time.Duration, error) is added so that you can retrieve an item, and also know the remaining TTL. Thanks to @asgarciap for contributing.
+
+# 2.7.0 (June 2021)
+
+#46 : got panic
+
+A panic occured in a line that checks the maximum amount of items in the cache. While not definite root cause has been found, there is indeed the possibility of crashing an empty cache if the cache limit is set to 'zero' which codes for infinite. This would lead to removal of the first item in the cache which would panic on an empty cache.
+
+Fixed this by applying the global cache lock to all configuration options as well.
+
+# 2.6.0 (May 2021)
+
+#44 : There are no API changes, but a contribution was made to use https://pkg.go.dev/golang.org/x/sync/singleflight as a way to provide everybody waiting for a key with that key when it's fetched.
+
+This removes some complexity from the code and will make sure that all callers will get a return value even if there's high concurrency and low TTL (as proven by the test that was added).
+
+# 2.5.0 (May 2021)
+
+## API changes:
+
+* #39 : Allow custom loader function for each key via `GetByLoader`
+
+Introduce the `SimpleCache` interface for quick-start and basic usage.
+
+# 2.4.0 (April 2021)
+
+## API changes:
+
+* #42 : Add option to get list of keys
+* #40: Allow 'Touch' on items without other operation
+
+// Touch resets the TTL of the key when it exists, returns ErrNotFound if the key is not present.
+func (cache *Cache) Touch(key string) error
+
+// GetKeys returns all keys of items in the cache. Returns nil when the cache has been closed.
+func (cache *Cache) GetKeys() []string
+
+# 2.3.0 (February 2021)
+
+## API changes:
+
+* #38: Added func (cache *Cache) SetExpirationReasonCallback(callback ExpireReasonCallback) This wil function will replace SetExpirationCallback(..) in the next major version.
+
+# 2.2.0 (January 2021)
+
+## API changes:
+
+* #37 : a GetMetrics call is now available for some information on hits/misses etc.
+* #34 : Errors are now const
+
+# 2.1.0 (October 2020)
+
+## API changes
+
+* `SetCacheSizeLimit(limit int)` a call was contributed to set a cache limit. #35
+
+# 2.0.0 (July 2020)
+
+## Fixes #29, #30, #31
+
+## Behavioural changes
+
+* `Remove(key)` now also calls the expiration callback when it's set
+* `Count()` returns zero when the cache is closed
+
+## API changes
+
+* `SetLoaderFunction` allows you to provide a function to retrieve data on missing cache keys.
+* Operations that affect item behaviour such as `Close`, `Set`, `SetWithTTL`, `Get`, `Remove`, `Purge` now return an error with standard errors `ErrClosed` an `ErrNotFound` instead of a bool or nothing
+* `SkipTTLExtensionOnHit` replaces `SkipTtlExtensionOnHit` to satisfy golint
+* The callback types are now exported
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..f36a3b9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Jellydator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..a8af58b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,134 @@
+[#](#) TTLCache - an in-memory cache with item expiration
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/jellydator/ttlcache/v3.svg)](https://pkg.go.dev/github.com/jellydator/ttlcache/v3)
+[![Build Status](https://github.com/jellydator/ttlcache/actions/workflows/go.yml/badge.svg)](https://github.com/jellydator/ttlcache/actions/workflows/go.yml)
+[![Coverage Status](https://coveralls.io/repos/github/jellydator/ttlcache/badge.svg?branch=master)](https://coveralls.io/github/jellydator/ttlcache?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/jellydator/ttlcache/v3)](https://goreportcard.com/report/github.com/jellydator/ttlcache/v3)
+
+## Features
+- Simple API.
+- Type parameters.
+- Item expiration and automatic deletion.
+- Automatic expiration time extension on each `Get` call.
+- `Loader` interface that is used to load/lazily initialize missing cache
+items.
+- Subscription to cache events (insertion and eviction).
+- Metrics.
+- Configurability.
+
+## Installation
+```
+go get github.com/jellydator/ttlcache/v3
+```
+
+## Usage
+The main type of `ttlcache` is `Cache`. It represents a single
+in-memory data store.
+
+To create a new instance of `ttlcache.Cache`, the `ttlcache.New()` function
+should be called:
+```go
+func main() {
+ cache := ttlcache.New[string, string]()
+}
+```
+
+Note that by default, a new cache instance does not let any of its
+items to expire or be automatically deleted. However, this feature
+can be activated by passing a few additional options into the
+`ttlcache.New()` function and calling the `cache.Start()` method:
+```go
+func main() {
+ cache := ttlcache.New[string, string](
+ ttlcache.WithTTL[string, string](30 * time.Minute),
+ )
+
+ go cache.Start() // starts automatic expired item deletion
+}
+```
+
+Even though the `cache.Start()` method handles expired item deletion well,
+there may be times when the system that uses `ttlcache` needs to determine
+when to delete the expired items itself. For example, it may need to
+delete them only when the resource load is at its lowest (e.g., after
+midnight, when the number of users/HTTP requests drops). So, in situations
+like these, instead of calling `cache.Start()`, the system could
+periodically call `cache.DeleteExpired()`:
+```go
+func main() {
+ cache := ttlcache.New[string, string](
+ ttlcache.WithTTL[string, string](30 * time.Minute),
+ )
+
+ for {
+ time.Sleep(4 * time.Hour)
+ cache.DeleteExpired()
+ }
+}
+```
+
+The data stored in `ttlcache.Cache` can be retrieved and updated with
+`Set`, `Get`, `Delete`, etc. methods:
+```go
+func main() {
+ cache := ttlcache.New[string, string](
+ ttlcache.WithTTL[string, string](30 * time.Minute),
+ )
+
+ // insert data
+ cache.Set("first", "value1", ttlcache.DefaultTTL)
+ cache.Set("second", "value2", ttlcache.NoTTL)
+ cache.Set("third", "value3", ttlcache.DefaultTTL)
+
+ // retrieve data
+ item := cache.Get("first")
+ fmt.Println(item.Value(), item.ExpiresAt())
+
+ // delete data
+ cache.Delete("second")
+ cache.DeleteExpired()
+ cache.DeleteAll()
+}
+```
+
+To subscribe to insertion and eviction events, `cache.OnInsertion()` and
+`cache.OnEviction()` methods should be used:
+```go
+func main() {
+ cache := ttlcache.New[string, string](
+ ttlcache.WithTTL[string, string](30 * time.Minute),
+ ttlcache.WithCapacity[string, string](300),
+ )
+
+ cache.OnInsertion(func(ctx context.Context, item *ttlcache.Item[string, string]) {
+ fmt.Println(item.Value(), item.ExpiresAt())
+ })
+ cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[string, string]) {
+ if reason == ttlcache.EvictionReasonCapacityReached {
+ fmt.Println(item.Key(), item.Value())
+ }
+ })
+
+ cache.Set("first", "value1", ttlcache.DefaultTTL)
+ cache.DeleteAll()
+}
+```
+
+To load data when the cache does not have it, a custom or
+existing implementation of `ttlcache.Loader` can be used:
+```go
+func main() {
+ loader := ttlcache.LoaderFunc[string, string](
+ func(c *ttlcache.Cache[string, string], key string) *ttlcache.Item[string, string] {
+ // load from file/make an HTTP request
+ item := c.Set("key from file", "value from file")
+ return item
+ },
+ )
+ cache := ttlcache.New[string, string](
+ ttlcache.WithLoader[string, string](loader),
+ )
+
+ item := cache.Get("key from file")
+}
+```
diff --git a/bench/bench_test.go b/bench/bench_test.go
new file mode 100644
index 0000000..1ea6cb1
--- /dev/null
+++ b/bench/bench_test.go
@@ -0,0 +1,27 @@
+package bench
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ ttlcache "github.com/jellydator/ttlcache/v3"
+)
+
+func BenchmarkCacheSetWithoutTTL(b *testing.B) {
+ cache := ttlcache.New[string, string]()
+
+ for n := 0; n < b.N; n++ {
+ cache.Set(fmt.Sprint(n%1000000), "value", ttlcache.NoTTL)
+ }
+}
+
+func BenchmarkCacheSetWithGlobalTTL(b *testing.B) {
+ cache := ttlcache.New[string, string](
+ ttlcache.WithTTL[string, string](50 * time.Millisecond),
+ )
+
+ for n := 0; n < b.N; n++ {
+ cache.Set(fmt.Sprint(n%1000000), "value", ttlcache.DefaultTTL)
+ }
+}
diff --git a/cache.go b/cache.go
new file mode 100644
index 0000000..6971f26
--- /dev/null
+++ b/cache.go
@@ -0,0 +1,587 @@
+package ttlcache
+
+import (
+ "container/list"
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "golang.org/x/sync/singleflight"
+)
+
+// Available eviction reasons.
+const (
+ EvictionReasonDeleted EvictionReason = iota + 1
+ EvictionReasonCapacityReached
+ EvictionReasonExpired
+)
+
+// EvictionReason is used to specify why a certain item was
+// evicted/deleted.
+type EvictionReason int
+
+// Cache is a synchronised map of items that are automatically removed
+// when they expire or the capacity is reached.
+type Cache[K comparable, V any] struct {
+ items struct {
+ mu sync.RWMutex
+ values map[K]*list.Element
+
+ // a generic doubly linked list would be more convenient
+ // (and more performant?). It's possible that this
+ // will be introduced with/in go1.19+
+ lru *list.List
+ expQueue expirationQueue[K, V]
+
+ timerCh chan time.Duration
+ }
+
+ metricsMu sync.RWMutex
+ metrics Metrics
+
+ events struct {
+ insertion struct {
+ mu sync.RWMutex
+ nextID uint64
+ fns map[uint64]func(*Item[K, V])
+ }
+ eviction struct {
+ mu sync.RWMutex
+ nextID uint64
+ fns map[uint64]func(EvictionReason, *Item[K, V])
+ }
+ }
+
+ stopCh chan struct{}
+ options options[K, V]
+}
+
+// New creates a new instance of cache.
+func New[K comparable, V any](opts ...Option[K, V]) *Cache[K, V] {
+ c := &Cache[K, V]{
+ stopCh: make(chan struct{}),
+ }
+ c.items.values = make(map[K]*list.Element)
+ c.items.lru = list.New()
+ c.items.expQueue = newExpirationQueue[K, V]()
+ c.items.timerCh = make(chan time.Duration, 1) // buffer is important
+ c.events.insertion.fns = make(map[uint64]func(*Item[K, V]))
+ c.events.eviction.fns = make(map[uint64]func(EvictionReason, *Item[K, V]))
+
+ applyOptions(&c.options, opts...)
+
+ return c
+}
+
+// updateExpirations updates the expiration queue and notifies
+// the cache auto cleaner if needed.
+// Not concurrently safe.
+func (c *Cache[K, V]) updateExpirations(fresh bool, elem *list.Element) {
+ var oldExpiresAt time.Time
+
+ if !c.items.expQueue.isEmpty() {
+ oldExpiresAt = c.items.expQueue[0].Value.(*Item[K, V]).expiresAt
+ }
+
+ if fresh {
+ c.items.expQueue.push(elem)
+ } else {
+ c.items.expQueue.update(elem)
+ }
+
+ newExpiresAt := c.items.expQueue[0].Value.(*Item[K, V]).expiresAt
+
+ // check if the closest/soonest expiration timestamp changed
+ if newExpiresAt.IsZero() || (!oldExpiresAt.IsZero() && !newExpiresAt.Before(oldExpiresAt)) {
+ return
+ }
+
+ d := time.Until(newExpiresAt)
+
+ // It's possible that the auto cleaner isn't active or
+ // is busy, so we need to drain the channel before
+ // sending a new value.
+ // Also, since this method is called after locking the items' mutex,
+ // we can be sure that there is no other concurrent call of this
+ // method
+ if len(c.items.timerCh) > 0 {
+ // we need to drain this channel in a select with a default
+ // case because it's possible that the auto cleaner
+ // read this channel just after we entered this if
+ select {
+ case d1 := <-c.items.timerCh:
+ if d1 < d {
+ d = d1
+ }
+ default:
+ }
+ }
+
+ // since the channel has a size 1 buffer, we can be sure
+ // that the line below won't block (we can't overfill the buffer
+ // because we just drained it)
+ c.items.timerCh <- d
+}
+
+// set creates a new item, adds it to the cache and then returns it.
+// Not concurrently safe.
+func (c *Cache[K, V]) set(key K, value V, ttl time.Duration) *Item[K, V] {
+ if ttl == DefaultTTL {
+ ttl = c.options.ttl
+ }
+
+ elem := c.get(key, false)
+ if elem != nil {
+ // update/overwrite an existing item
+ item := elem.Value.(*Item[K, V])
+ item.update(value, ttl)
+ c.updateExpirations(false, elem)
+
+ return item
+ }
+
+ if c.options.capacity != 0 && uint64(len(c.items.values)) >= c.options.capacity {
+ // delete the oldest item
+ c.evict(EvictionReasonCapacityReached, c.items.lru.Back())
+ }
+
+ // create a new item
+ item := newItem(key, value, ttl)
+ elem = c.items.lru.PushFront(item)
+ c.items.values[key] = elem
+ c.updateExpirations(true, elem)
+
+ c.metricsMu.Lock()
+ c.metrics.Insertions++
+ c.metricsMu.Unlock()
+
+ c.events.insertion.mu.RLock()
+ for _, fn := range c.events.insertion.fns {
+ fn(item)
+ }
+ c.events.insertion.mu.RUnlock()
+
+ return item
+}
+
+// get retrieves an item from the cache and extends its expiration
+// time if 'touch' is set to true.
+// It returns nil if the item is not found or is expired.
+// Not concurrently safe.
+func (c *Cache[K, V]) get(key K, touch bool) *list.Element {
+ elem := c.items.values[key]
+ if elem == nil {
+ return nil
+ }
+
+ item := elem.Value.(*Item[K, V])
+ if item.isExpiredUnsafe() {
+ return nil
+ }
+
+ c.items.lru.MoveToFront(elem)
+
+ if touch && item.ttl > 0 {
+ item.touch()
+ c.updateExpirations(false, elem)
+ }
+
+ return elem
+}
+
+// evict deletes items from the cache.
+// If no items are provided, all currently present cache items
+// are evicted.
+// Not concurrently safe.
+func (c *Cache[K, V]) evict(reason EvictionReason, elems ...*list.Element) {
+ if len(elems) > 0 {
+ c.metricsMu.Lock()
+ c.metrics.Evictions += uint64(len(elems))
+ c.metricsMu.Unlock()
+
+ c.events.eviction.mu.RLock()
+ for i := range elems {
+ item := elems[i].Value.(*Item[K, V])
+ delete(c.items.values, item.key)
+ c.items.lru.Remove(elems[i])
+ c.items.expQueue.remove(elems[i])
+
+ for _, fn := range c.events.eviction.fns {
+ fn(reason, item)
+ }
+ }
+ c.events.eviction.mu.RUnlock()
+
+ return
+ }
+
+ c.metricsMu.Lock()
+ c.metrics.Evictions += uint64(len(c.items.values))
+ c.metricsMu.Unlock()
+
+ c.events.eviction.mu.RLock()
+ for _, elem := range c.items.values {
+ item := elem.Value.(*Item[K, V])
+
+ for _, fn := range c.events.eviction.fns {
+ fn(reason, item)
+ }
+ }
+ c.events.eviction.mu.RUnlock()
+
+ c.items.values = make(map[K]*list.Element)
+ c.items.lru.Init()
+ c.items.expQueue = newExpirationQueue[K, V]()
+}
+
+// Set creates a new item from the provided key and value, adds
+// it to the cache and then returns it. If an item associated with the
+// provided key already exists, the new item overwrites the existing one.
+func (c *Cache[K, V]) Set(key K, value V, ttl time.Duration) *Item[K, V] {
+ c.items.mu.Lock()
+ defer c.items.mu.Unlock()
+
+ return c.set(key, value, ttl)
+}
+
+// Get retrieves an item from the cache by the provided key.
+// Unless this is disabled, it also extends/touches an item's
+// expiration timestamp on successful retrieval.
+// If the item is not found, a nil value is returned.
+func (c *Cache[K, V]) Get(key K, opts ...Option[K, V]) *Item[K, V] {
+ getOpts := options[K, V]{
+ loader: c.options.loader,
+ disableTouchOnHit: c.options.disableTouchOnHit,
+ }
+
+ applyOptions(&getOpts, opts...)
+
+ c.items.mu.Lock()
+ elem := c.get(key, !getOpts.disableTouchOnHit)
+ c.items.mu.Unlock()
+
+ if elem == nil {
+ c.metricsMu.Lock()
+ c.metrics.Misses++
+ c.metricsMu.Unlock()
+
+ if getOpts.loader != nil {
+ return getOpts.loader.Load(c, key)
+ }
+
+ return nil
+ }
+
+ c.metricsMu.Lock()
+ c.metrics.Hits++
+ c.metricsMu.Unlock()
+
+ return elem.Value.(*Item[K, V])
+}
+
+// Delete deletes an item from the cache. If the item associated with
+// the key is not found, the method is no-op.
+func (c *Cache[K, V]) Delete(key K) {
+ c.items.mu.Lock()
+ defer c.items.mu.Unlock()
+
+ elem := c.items.values[key]
+ if elem == nil {
+ return
+ }
+
+ c.evict(EvictionReasonDeleted, elem)
+}
+
+// DeleteAll deletes all items from the cache.
+func (c *Cache[K, V]) DeleteAll() {
+ c.items.mu.Lock()
+ c.evict(EvictionReasonDeleted)
+ c.items.mu.Unlock()
+}
+
+// DeleteExpired deletes all expired items from the cache.
+func (c *Cache[K, V]) DeleteExpired() {
+ c.items.mu.Lock()
+ defer c.items.mu.Unlock()
+
+ if c.items.expQueue.isEmpty() {
+ return
+ }
+
+ e := c.items.expQueue[0]
+ for e.Value.(*Item[K, V]).isExpiredUnsafe() {
+ c.evict(EvictionReasonExpired, e)
+
+ if c.items.expQueue.isEmpty() {
+ break
+ }
+
+ // expiration queue has a new root
+ e = c.items.expQueue[0]
+ }
+}
+
+// Touch simulates an item's retrieval without actually returning it.
+// Its main purpose is to extend an item's expiration timestamp.
+// If the item is not found, the method is no-op.
+func (c *Cache[K, V]) Touch(key K) {
+ c.items.mu.Lock()
+ c.get(key, true)
+ c.items.mu.Unlock()
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache[K, V]) Len() int {
+ c.items.mu.RLock()
+ defer c.items.mu.RUnlock()
+
+ return len(c.items.values)
+}
+
+// Keys returns all keys currently present in the cache.
+func (c *Cache[K, V]) Keys() []K {
+ c.items.mu.RLock()
+ defer c.items.mu.RUnlock()
+
+ res := make([]K, 0, len(c.items.values))
+ for k := range c.items.values {
+ res = append(res, k)
+ }
+
+ return res
+}
+
+// Items returns a copy of all items in the cache.
+// It does not update any expiration timestamps.
+func (c *Cache[K, V]) Items() map[K]*Item[K, V] {
+ c.items.mu.RLock()
+ defer c.items.mu.RUnlock()
+
+ items := make(map[K]*Item[K, V], len(c.items.values))
+ for k := range c.items.values {
+ item := c.get(k, false)
+ if item != nil {
+ items[k] = item.Value.(*Item[K, V])
+ }
+ }
+
+ return items
+}
+
+// Metrics returns the metrics of the cache.
+func (c *Cache[K, V]) Metrics() Metrics {
+ c.metricsMu.RLock()
+ defer c.metricsMu.RUnlock()
+
+ return c.metrics
+}
+
+// Start starts an automatic cleanup process that
+// periodically deletes expired items.
+// It blocks until Stop is called.
+func (c *Cache[K, V]) Start() {
+ waitDur := func() time.Duration {
+ c.items.mu.RLock()
+ defer c.items.mu.RUnlock()
+
+ if !c.items.expQueue.isEmpty() &&
+ !c.items.expQueue[0].Value.(*Item[K, V]).expiresAt.IsZero() {
+ d := time.Until(c.items.expQueue[0].Value.(*Item[K, V]).expiresAt)
+ if d <= 0 {
+ // execute immediately
+ return time.Microsecond
+ }
+
+ return d
+ }
+
+ if c.options.ttl > 0 {
+ return c.options.ttl
+ }
+
+ return time.Hour
+ }
+
+ timer := time.NewTimer(waitDur())
+ stop := func() {
+ if !timer.Stop() {
+ // drain the timer chan
+ select {
+ case <-timer.C:
+ default:
+ }
+ }
+ }
+
+ defer stop()
+
+ for {
+ select {
+ case <-c.stopCh:
+ return
+ case d := <-c.items.timerCh:
+ stop()
+ timer.Reset(d)
+ case <-timer.C:
+ c.DeleteExpired()
+ stop()
+ timer.Reset(waitDur())
+ }
+ }
+}
+
+// Stop stops the automatic cleanup process.
+// It blocks until the cleanup process exits.
+func (c *Cache[K, V]) Stop() {
+ c.stopCh <- struct{}{}
+}
+
+// OnInsertion adds the provided function to be executed when
+// a new item is inserted into the cache. The function is executed
+// on a separate goroutine and does not block the flow of the cache
+// manager.
+// The returned function may be called to delete the subscription function
+// from the list of insertion subscribers.
+// When the returned function is called, it blocks until all instances of
+// the same subscription function return. A context is used to notify the
+// subscription function when the returned/deletion function is called.
+func (c *Cache[K, V]) OnInsertion(fn func(context.Context, *Item[K, V])) func() {
+ var (
+ wg sync.WaitGroup
+ ctx, cancel = context.WithCancel(context.Background())
+ )
+
+ c.events.insertion.mu.Lock()
+ id := c.events.insertion.nextID
+ c.events.insertion.fns[id] = func(item *Item[K, V]) {
+ wg.Add(1)
+ go func() {
+ fn(ctx, item)
+ wg.Done()
+ }()
+ }
+ c.events.insertion.nextID++
+ c.events.insertion.mu.Unlock()
+
+ return func() {
+ cancel()
+
+ c.events.insertion.mu.Lock()
+ delete(c.events.insertion.fns, id)
+ c.events.insertion.mu.Unlock()
+
+ wg.Wait()
+ }
+}
+
+// OnEviction adds the provided function to be executed when
+// an item is evicted/deleted from the cache. The function is executed
+// on a separate goroutine and does not block the flow of the cache
+// manager.
+// The returned function may be called to delete the subscription function
+// from the list of eviction subscribers.
+// When the returned function is called, it blocks until all instances of
+// the same subscription function return. A context is used to notify the
+// subscription function when the returned/deletion function is called.
+func (c *Cache[K, V]) OnEviction(fn func(context.Context, EvictionReason, *Item[K, V])) func() {
+ var (
+ wg sync.WaitGroup
+ ctx, cancel = context.WithCancel(context.Background())
+ )
+
+ c.events.eviction.mu.Lock()
+ id := c.events.eviction.nextID
+ c.events.eviction.fns[id] = func(r EvictionReason, item *Item[K, V]) {
+ wg.Add(1)
+ go func() {
+ fn(ctx, r, item)
+ wg.Done()
+ }()
+ }
+ c.events.eviction.nextID++
+ c.events.eviction.mu.Unlock()
+
+ return func() {
+ cancel()
+
+ c.events.eviction.mu.Lock()
+ delete(c.events.eviction.fns, id)
+ c.events.eviction.mu.Unlock()
+
+ wg.Wait()
+ }
+}
+
+// Loader is an interface that handles missing data loading.
+type Loader[K comparable, V any] interface {
+ // Load should execute a custom item retrieval logic and
+ // return the item that is associated with the key.
+ // It should return nil if the item is not found/valid.
+ // The method is allowed to fetch data from the cache instance
+ // or update it for future use.
+ Load(c *Cache[K, V], key K) *Item[K, V]
+}
+
+// LoaderFunc type is an adapter that allows the use of ordinary
+// functions as data loaders.
+type LoaderFunc[K comparable, V any] func(*Cache[K, V], K) *Item[K, V]
+
+// Load executes a custom item retrieval logic and returns the item that
+// is associated with the key.
+// It returns nil if the item is not found/valid.
+func (l LoaderFunc[K, V]) Load(c *Cache[K, V], key K) *Item[K, V] {
+ return l(c, key)
+}
+
+// SuppressedLoader wraps another Loader and suppresses duplicate
+// calls to its Load method.
+type SuppressedLoader[K comparable, V any] struct {
+ loader Loader[K, V]
+ group *singleflight.Group
+}
+
+// NewSuppressedLoader creates a new instance of suppressed loader.
+// If the group parameter is nil, a newly created instance of
+// *singleflight.Group is used.
+func NewSuppressedLoader[K comparable, V any](loader Loader[K, V], group *singleflight.Group) *SuppressedLoader[K, V] {
+ if group == nil {
+ group = &singleflight.Group{}
+ }
+
+ return &SuppressedLoader[K, V]{
+ loader: loader,
+ group: group,
+ }
+}
+
+// Load executes a custom item retrieval logic and returns the item that
+// is associated with the key.
+// It returns nil if the item is not found/valid.
+// It also ensures that only one execution of the wrapped Loader's Load
+// method is in-flight for a given key at a time.
+func (l *SuppressedLoader[K, V]) Load(c *Cache[K, V], key K) *Item[K, V] {
+ // there should be a better/generic way to create a
+ // singleflight Group's key. It's possible that a generic
+ // singleflight.Group will be introduced with/in go1.19+
+ strKey := fmt.Sprint(key)
+
+ // the error can be discarded since the singleflight.Group
+ // itself does not return any of its errors, it returns
+ // the error that we return ourselves in the func below, which
+ // is also nil
+ res, _, _ := l.group.Do(strKey, func() (interface{}, error) {
+ item := l.loader.Load(c, key)
+ if item == nil {
+ return nil, nil
+ }
+
+ return item, nil
+ })
+ if res == nil {
+ return nil
+ }
+
+ return res.(*Item[K, V])
+}
diff --git a/cache_test.go b/cache_test.go
new file mode 100644
index 0000000..0d5a265
--- /dev/null
+++ b/cache_test.go
@@ -0,0 +1,1087 @@
+package ttlcache
+
+import (
+ "container/list"
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/goleak"
+ "golang.org/x/sync/singleflight"
+)
+
+func TestMain(m *testing.M) {
+ goleak.VerifyTestMain(m)
+}
+
+func Test_New(t *testing.T) {
+ c := New[string, string](
+ WithTTL[string, string](time.Hour),
+ WithCapacity[string, string](1),
+ )
+ require.NotNil(t, c)
+ assert.NotNil(t, c.stopCh)
+ assert.NotNil(t, c.items.values)
+ assert.NotNil(t, c.items.lru)
+ assert.NotNil(t, c.items.expQueue)
+ assert.NotNil(t, c.items.timerCh)
+ assert.NotNil(t, c.events.insertion.fns)
+ assert.NotNil(t, c.events.eviction.fns)
+ assert.Equal(t, time.Hour, c.options.ttl)
+ assert.Equal(t, uint64(1), c.options.capacity)
+}
+
+func Test_Cache_updateExpirations(t *testing.T) {
+ oldExp, newExp := time.Now().Add(time.Hour), time.Now().Add(time.Minute)
+
+ cc := map[string]struct {
+ TimerChValue time.Duration
+ Fresh bool
+ EmptyQueue bool
+ OldExpiresAt time.Time
+ NewExpiresAt time.Time
+ Result time.Duration
+ }{
+ "Update with fresh item and zero new and non zero old expiresAt fields": {
+ Fresh: true,
+ OldExpiresAt: oldExp,
+ },
+ "Update with non fresh item and zero new and non zero old expiresAt fields": {
+ OldExpiresAt: oldExp,
+ },
+ "Update with fresh item and matching new and old expiresAt fields": {
+ Fresh: true,
+ OldExpiresAt: oldExp,
+ NewExpiresAt: oldExp,
+ },
+ "Update with non fresh item and matching new and old expiresAt fields": {
+ OldExpiresAt: oldExp,
+ NewExpiresAt: oldExp,
+ },
+ "Update with non zero new expiresAt field and empty queue": {
+ Fresh: true,
+ EmptyQueue: true,
+ NewExpiresAt: newExp,
+ Result: time.Until(newExp),
+ },
+ "Update with fresh item and non zero new and zero old expiresAt fields": {
+ Fresh: true,
+ NewExpiresAt: newExp,
+ Result: time.Until(newExp),
+ },
+ "Update with non fresh item and non zero new and zero old expiresAt fields": {
+ NewExpiresAt: newExp,
+ Result: time.Until(newExp),
+ },
+ "Update with fresh item and non zero new and old expiresAt fields": {
+ Fresh: true,
+ OldExpiresAt: oldExp,
+ NewExpiresAt: newExp,
+ Result: time.Until(newExp),
+ },
+ "Update with non fresh item and non zero new and old expiresAt fields": {
+ OldExpiresAt: oldExp,
+ NewExpiresAt: newExp,
+ Result: time.Until(newExp),
+ },
+ "Update with full timerCh (lesser value), fresh item and non zero new and old expiresAt fields": {
+ TimerChValue: time.Second,
+ Fresh: true,
+ OldExpiresAt: oldExp,
+ NewExpiresAt: newExp,
+ Result: time.Second,
+ },
+ "Update with full timerCh (lesser value), non fresh item and non zero new and old expiresAt fields": {
+ TimerChValue: time.Second,
+ OldExpiresAt: oldExp,
+ NewExpiresAt: newExp,
+ Result: time.Second,
+ },
+ "Update with full timerCh (greater value), fresh item and non zero new and old expiresAt fields": {
+ TimerChValue: time.Hour,
+ Fresh: true,
+ OldExpiresAt: oldExp,
+ NewExpiresAt: newExp,
+ Result: time.Until(newExp),
+ },
+ "Update with full timerCh (greater value), non fresh item and non zero new and old expiresAt fields": {
+ TimerChValue: time.Hour,
+ OldExpiresAt: oldExp,
+ NewExpiresAt: newExp,
+ Result: time.Until(newExp),
+ },
+ }
+
+ for cn, c := range cc {
+ c := c
+
+ t.Run(cn, func(t *testing.T) {
+ t.Parallel()
+
+ cache := prepCache(time.Hour)
+
+ if c.TimerChValue > 0 {
+ cache.items.timerCh <- c.TimerChValue
+ }
+
+ elem := &list.Element{
+ Value: &Item[string, string]{
+ expiresAt: c.NewExpiresAt,
+ },
+ }
+
+ if !c.EmptyQueue {
+ cache.items.expQueue.push(&list.Element{
+ Value: &Item[string, string]{
+ expiresAt: c.OldExpiresAt,
+ },
+ })
+
+ if !c.Fresh {
+ elem = &list.Element{
+ Value: &Item[string, string]{
+ expiresAt: c.OldExpiresAt,
+ },
+ }
+ cache.items.expQueue.push(elem)
+
+ elem.Value.(*Item[string, string]).expiresAt = c.NewExpiresAt
+ }
+ }
+
+ cache.updateExpirations(c.Fresh, elem)
+
+ var res time.Duration
+
+ select {
+ case res = <-cache.items.timerCh:
+ default:
+ }
+
+ assert.InDelta(t, c.Result, res, float64(time.Second))
+ })
+ }
+}
+
+func Test_Cache_set(t *testing.T) {
+ const newKey, existingKey, evictedKey = "newKey123", "existingKey", "evicted"
+
+ cc := map[string]struct {
+ Capacity uint64
+ Key string
+ TTL time.Duration
+ Metrics Metrics
+ ExpectFns bool
+ }{
+ "Set with existing key and custom TTL": {
+ Key: existingKey,
+ TTL: time.Minute,
+ },
+ "Set with existing key and NoTTL": {
+ Key: existingKey,
+ TTL: NoTTL,
+ },
+ "Set with existing key and DefaultTTL": {
+ Key: existingKey,
+ TTL: DefaultTTL,
+ },
+ "Set with new key and eviction caused by small capacity": {
+ Capacity: 3,
+ Key: newKey,
+ TTL: DefaultTTL,
+ Metrics: Metrics{
+ Insertions: 1,
+ Evictions: 1,
+ },
+ ExpectFns: true,
+ },
+ "Set with new key and no eviction caused by large capacity": {
+ Capacity: 10,
+ Key: newKey,
+ TTL: DefaultTTL,
+ Metrics: Metrics{
+ Insertions: 1,
+ },
+ ExpectFns: true,
+ },
+ "Set with new key and custom TTL": {
+ Key: newKey,
+ TTL: time.Minute,
+ Metrics: Metrics{
+ Insertions: 1,
+ },
+ ExpectFns: true,
+ },
+ "Set with new key and NoTTL": {
+ Key: newKey,
+ TTL: NoTTL,
+ Metrics: Metrics{
+ Insertions: 1,
+ },
+ ExpectFns: true,
+ },
+ "Set with new key and DefaultTTL": {
+ Key: newKey,
+ TTL: DefaultTTL,
+ Metrics: Metrics{
+ Insertions: 1,
+ },
+ ExpectFns: true,
+ },
+ }
+
+ for cn, c := range cc {
+ c := c
+
+ t.Run(cn, func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ insertFnsCalls int
+ evictionFnsCalls int
+ )
+
+ cache := prepCache(time.Hour, evictedKey, existingKey, "test3")
+ cache.options.capacity = c.Capacity
+ cache.options.ttl = time.Minute * 20
+ cache.events.insertion.fns[1] = func(item *Item[string, string]) {
+ assert.Equal(t, newKey, item.key)
+ insertFnsCalls++
+ }
+ cache.events.insertion.fns[2] = cache.events.insertion.fns[1]
+ cache.events.eviction.fns[1] = func(r EvictionReason, item *Item[string, string]) {
+ assert.Equal(t, EvictionReasonCapacityReached, r)
+ assert.Equal(t, evictedKey, item.key)
+ evictionFnsCalls++
+ }
+ cache.events.eviction.fns[2] = cache.events.eviction.fns[1]
+
+ total := 3
+ if c.Key == newKey && (c.Capacity == 0 || c.Capacity >= 4) {
+ total++
+ }
+
+ item := cache.set(c.Key, "value123", c.TTL)
+
+ if c.ExpectFns {
+ assert.Equal(t, 2, insertFnsCalls)
+
+ if c.Capacity > 0 && c.Capacity < 4 {
+ assert.Equal(t, 2, evictionFnsCalls)
+ }
+ }
+
+ assert.Same(t, cache.items.values[c.Key].Value.(*Item[string, string]), item)
+ assert.Len(t, cache.items.values, total)
+ assert.Equal(t, c.Key, item.key)
+ assert.Equal(t, "value123", item.value)
+ assert.Equal(t, c.Key, cache.items.lru.Front().Value.(*Item[string, string]).key)
+ assert.Equal(t, c.Metrics, cache.metrics)
+
+ if c.Capacity > 0 && c.Capacity < 4 {
+ assert.NotEqual(t, evictedKey, cache.items.lru.Back().Value.(*Item[string, string]).key)
+ }
+
+ switch {
+ case c.TTL == DefaultTTL:
+ assert.Equal(t, cache.options.ttl, item.ttl)
+ assert.WithinDuration(t, time.Now(), item.expiresAt, cache.options.ttl)
+ assert.Equal(t, c.Key, cache.items.expQueue[0].Value.(*Item[string, string]).key)
+ case c.TTL > DefaultTTL:
+ assert.Equal(t, c.TTL, item.ttl)
+ assert.WithinDuration(t, time.Now(), item.expiresAt, c.TTL)
+ assert.Equal(t, c.Key, cache.items.expQueue[0].Value.(*Item[string, string]).key)
+ default:
+ assert.Equal(t, c.TTL, item.ttl)
+ assert.Zero(t, item.expiresAt)
+ assert.NotEqual(t, c.Key, cache.items.expQueue[0].Value.(*Item[string, string]).key)
+ }
+ })
+ }
+}
+
+func Test_Cache_get(t *testing.T) {
+ const existingKey, notFoundKey, expiredKey = "existing", "notfound", "expired"
+
+ cc := map[string]struct {
+ Key string
+ Touch bool
+ WithTTL bool
+ }{
+ "Retrieval of non-existent item": {
+ Key: notFoundKey,
+ },
+ "Retrieval of expired item": {
+ Key: expiredKey,
+ },
+ "Retrieval of existing item without update": {
+ Key: existingKey,
+ },
+ "Retrieval of existing item with touch and non zero TTL": {
+ Key: existingKey,
+ Touch: true,
+ WithTTL: true,
+ },
+ "Retrieval of existing item with touch and zero TTL": {
+ Key: existingKey,
+ Touch: true,
+ },
+ }
+
+ for cn, c := range cc {
+ c := c
+
+ t.Run(cn, func(t *testing.T) {
+ t.Parallel()
+
+ cache := prepCache(time.Hour, existingKey, "test2", "test3")
+ addToCache(cache, time.Nanosecond, expiredKey)
+ time.Sleep(time.Millisecond) // force expiration
+
+ oldItem := cache.items.values[existingKey].Value.(*Item[string, string])
+ oldQueueIndex := oldItem.queueIndex
+ oldExpiresAt := oldItem.expiresAt
+
+ if c.WithTTL {
+ oldItem.ttl = time.Hour * 30
+ } else {
+ oldItem.ttl = 0
+ }
+
+ elem := cache.get(c.Key, c.Touch)
+
+ if c.Key == notFoundKey {
+ assert.Nil(t, elem)
+ return
+ }
+
+ if c.Key == expiredKey {
+ assert.True(t, time.Now().After(cache.items.values[expiredKey].Value.(*Item[string, string]).expiresAt))
+ assert.Nil(t, elem)
+ return
+ }
+
+ require.NotNil(t, elem)
+ item := elem.Value.(*Item[string, string])
+
+ if c.Touch && c.WithTTL {
+ assert.True(t, item.expiresAt.After(oldExpiresAt))
+ assert.NotEqual(t, oldQueueIndex, item.queueIndex)
+ } else {
+ assert.True(t, item.expiresAt.Equal(oldExpiresAt))
+ assert.Equal(t, oldQueueIndex, item.queueIndex)
+ }
+
+ assert.Equal(t, c.Key, cache.items.lru.Front().Value.(*Item[string, string]).key)
+ })
+ }
+}
+
+func Test_Cache_evict(t *testing.T) {
+ var (
+ key1FnsCalls int
+ key2FnsCalls int
+ key3FnsCalls int
+ key4FnsCalls int
+ )
+
+ cache := prepCache(time.Hour, "1", "2", "3", "4")
+ cache.events.eviction.fns[1] = func(r EvictionReason, item *Item[string, string]) {
+ assert.Equal(t, EvictionReasonDeleted, r)
+ switch item.key {
+ case "1":
+ key1FnsCalls++
+ case "2":
+ key2FnsCalls++
+ case "3":
+ key3FnsCalls++
+ case "4":
+ key4FnsCalls++
+ }
+ }
+ cache.events.eviction.fns[2] = cache.events.eviction.fns[1]
+
+ // delete only specified
+ cache.evict(EvictionReasonDeleted, cache.items.lru.Back(), cache.items.lru.Back().Prev())
+
+ assert.Equal(t, 2, key1FnsCalls)
+ assert.Equal(t, 2, key2FnsCalls)
+ assert.Zero(t, key3FnsCalls)
+ assert.Zero(t, key4FnsCalls)
+ assert.Len(t, cache.items.values, 2)
+ assert.NotContains(t, cache.items.values, "1")
+ assert.NotContains(t, cache.items.values, "2")
+ assert.Equal(t, uint64(2), cache.metrics.Evictions)
+
+ // delete all
+ key1FnsCalls, key2FnsCalls = 0, 0
+ cache.metrics.Evictions = 0
+
+ cache.evict(EvictionReasonDeleted)
+
+ assert.Zero(t, key1FnsCalls)
+ assert.Zero(t, key2FnsCalls)
+ assert.Equal(t, 2, key3FnsCalls)
+ assert.Equal(t, 2, key4FnsCalls)
+ assert.Empty(t, cache.items.values)
+ assert.NotContains(t, cache.items.values, "3")
+ assert.NotContains(t, cache.items.values, "4")
+ assert.Equal(t, uint64(2), cache.metrics.Evictions)
+}
+
+func Test_Cache_Set(t *testing.T) {
+ cache := prepCache(time.Hour, "test1", "test2", "test3")
+ item := cache.Set("hello", "value123", time.Minute)
+ require.NotNil(t, item)
+ assert.Same(t, item, cache.items.values["hello"].Value)
+
+ item = cache.Set("test1", "value123", time.Minute)
+ require.NotNil(t, item)
+ assert.Same(t, item, cache.items.values["test1"].Value)
+}
+
+func Test_Cache_Get(t *testing.T) {
+ const notFoundKey, foundKey = "notfound", "test1"
+ cc := map[string]struct {
+ Key string
+ DefaultOptions options[string, string]
+ CallOptions []Option[string, string]
+ Metrics Metrics
+ Result *Item[string, string]
+ }{
+ "Get without loader when item is not found": {
+ Key: notFoundKey,
+ Metrics: Metrics{
+ Misses: 1,
+ },
+ },
+ "Get with default loader that returns non nil value when item is not found": {
+ Key: notFoundKey,
+ DefaultOptions: options[string, string]{
+ loader: LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ return &Item[string, string]{key: "test"}
+ }),
+ },
+ Metrics: Metrics{
+ Misses: 1,
+ },
+ Result: &Item[string, string]{key: "test"},
+ },
+ "Get with default loader that returns nil value when item is not found": {
+ Key: notFoundKey,
+ DefaultOptions: options[string, string]{
+ loader: LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ return nil
+ }),
+ },
+ Metrics: Metrics{
+ Misses: 1,
+ },
+ },
+ "Get with call loader that returns non nil value when item is not found": {
+ Key: notFoundKey,
+ DefaultOptions: options[string, string]{
+ loader: LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ return &Item[string, string]{key: "test"}
+ }),
+ },
+ CallOptions: []Option[string, string]{
+ WithLoader[string, string](LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ return &Item[string, string]{key: "hello"}
+ })),
+ },
+ Metrics: Metrics{
+ Misses: 1,
+ },
+ Result: &Item[string, string]{key: "hello"},
+ },
+ "Get with call loader that returns nil value when item is not found": {
+ Key: notFoundKey,
+ DefaultOptions: options[string, string]{
+ loader: LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ return &Item[string, string]{key: "test"}
+ }),
+ },
+ CallOptions: []Option[string, string]{
+ WithLoader[string, string](LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ return nil
+ })),
+ },
+ Metrics: Metrics{
+ Misses: 1,
+ },
+ },
+ "Get when TTL extension is disabled by default and item is found": {
+ Key: foundKey,
+ DefaultOptions: options[string, string]{
+ disableTouchOnHit: true,
+ },
+ Metrics: Metrics{
+ Hits: 1,
+ },
+ },
+ "Get when TTL extension is disabled and item is found": {
+ Key: foundKey,
+ CallOptions: []Option[string, string]{
+ WithDisableTouchOnHit[string, string](),
+ },
+ Metrics: Metrics{
+ Hits: 1,
+ },
+ },
+ "Get when item is found": {
+ Key: foundKey,
+ Metrics: Metrics{
+ Hits: 1,
+ },
+ },
+ }
+
+ for cn, c := range cc {
+ c := c
+
+ t.Run(cn, func(t *testing.T) {
+ t.Parallel()
+
+ cache := prepCache(time.Minute, foundKey, "test2", "test3")
+ oldExpiresAt := cache.items.values[foundKey].Value.(*Item[string, string]).expiresAt
+ cache.options = c.DefaultOptions
+
+ res := cache.Get(c.Key, c.CallOptions...)
+
+ if c.Key == foundKey {
+ c.Result = cache.items.values[foundKey].Value.(*Item[string, string])
+ assert.Equal(t, foundKey, cache.items.lru.Front().Value.(*Item[string, string]).key)
+ }
+
+ assert.Equal(t, c.Metrics, cache.metrics)
+
+ if !assert.Equal(t, c.Result, res) || res == nil || res.ttl == 0 {
+ return
+ }
+
+ applyOptions(&c.DefaultOptions, c.CallOptions...)
+
+ if c.DefaultOptions.disableTouchOnHit {
+ assert.Equal(t, oldExpiresAt, res.expiresAt)
+ return
+ }
+
+ assert.True(t, oldExpiresAt.Before(res.expiresAt))
+ assert.WithinDuration(t, time.Now(), res.expiresAt, res.ttl)
+ })
+ }
+}
+
+func Test_Cache_Delete(t *testing.T) {
+ var fnsCalls int
+
+ cache := prepCache(time.Hour, "1", "2", "3", "4")
+ cache.events.eviction.fns[1] = func(r EvictionReason, item *Item[string, string]) {
+ assert.Equal(t, EvictionReasonDeleted, r)
+ fnsCalls++
+ }
+ cache.events.eviction.fns[2] = cache.events.eviction.fns[1]
+
+ // not found
+ cache.Delete("1234")
+ assert.Zero(t, fnsCalls)
+ assert.Len(t, cache.items.values, 4)
+
+ // success
+ cache.Delete("1")
+ assert.Equal(t, 2, fnsCalls)
+ assert.Len(t, cache.items.values, 3)
+ assert.NotContains(t, cache.items.values, "1")
+}
+
+func Test_Cache_DeleteAll(t *testing.T) {
+ var (
+ key1FnsCalls int
+ key2FnsCalls int
+ key3FnsCalls int
+ key4FnsCalls int
+ )
+
+ cache := prepCache(time.Hour, "1", "2", "3", "4")
+ cache.events.eviction.fns[1] = func(r EvictionReason, item *Item[string, string]) {
+ assert.Equal(t, EvictionReasonDeleted, r)
+ switch item.key {
+ case "1":
+ key1FnsCalls++
+ case "2":
+ key2FnsCalls++
+ case "3":
+ key3FnsCalls++
+ case "4":
+ key4FnsCalls++
+ }
+ }
+ cache.events.eviction.fns[2] = cache.events.eviction.fns[1]
+
+ cache.DeleteAll()
+ assert.Empty(t, cache.items.values)
+ assert.Equal(t, 2, key1FnsCalls)
+ assert.Equal(t, 2, key2FnsCalls)
+ assert.Equal(t, 2, key3FnsCalls)
+ assert.Equal(t, 2, key4FnsCalls)
+}
+
+func Test_Cache_DeleteExpired(t *testing.T) {
+ var (
+ key1FnsCalls int
+ key2FnsCalls int
+ )
+
+ cache := prepCache(time.Hour)
+ cache.events.eviction.fns[1] = func(r EvictionReason, item *Item[string, string]) {
+ assert.Equal(t, EvictionReasonExpired, r)
+ switch item.key {
+ case "5":
+ key1FnsCalls++
+ case "6":
+ key2FnsCalls++
+ }
+ }
+ cache.events.eviction.fns[2] = cache.events.eviction.fns[1]
+
+ // one item
+ addToCache(cache, time.Nanosecond, "5")
+
+ cache.DeleteExpired()
+ assert.Empty(t, cache.items.values)
+ assert.NotContains(t, cache.items.values, "5")
+ assert.Equal(t, 2, key1FnsCalls)
+
+ key1FnsCalls = 0
+
+ // empty
+ cache.DeleteExpired()
+ assert.Empty(t, cache.items.values)
+
+ // non empty
+ addToCache(cache, time.Hour, "1", "2", "3", "4")
+ addToCache(cache, time.Nanosecond, "5")
+ addToCache(cache, time.Nanosecond, "6") // we need multiple calls to avoid adding time.Minute to ttl
+ time.Sleep(time.Millisecond) // force expiration
+
+ cache.DeleteExpired()
+ assert.Len(t, cache.items.values, 4)
+ assert.NotContains(t, cache.items.values, "5")
+ assert.NotContains(t, cache.items.values, "6")
+ assert.Equal(t, 2, key1FnsCalls)
+ assert.Equal(t, 2, key2FnsCalls)
+}
+
+func Test_Cache_Touch(t *testing.T) {
+ cache := prepCache(time.Hour, "1", "2")
+ oldExpiresAt := cache.items.values["1"].Value.(*Item[string, string]).expiresAt
+
+ cache.Touch("1")
+
+ newExpiresAt := cache.items.values["1"].Value.(*Item[string, string]).expiresAt
+ assert.True(t, newExpiresAt.After(oldExpiresAt))
+ assert.Equal(t, "1", cache.items.lru.Front().Value.(*Item[string, string]).key)
+}
+
+func Test_Cache_Len(t *testing.T) {
+ cache := prepCache(time.Hour, "1", "2")
+ assert.Equal(t, 2, cache.Len())
+}
+
+func Test_Cache_Keys(t *testing.T) {
+ cache := prepCache(time.Hour, "1", "2", "3")
+ assert.ElementsMatch(t, []string{"1", "2", "3"}, cache.Keys())
+}
+
+func Test_Cache_Items(t *testing.T) {
+ cache := prepCache(time.Hour, "1", "2", "3")
+ items := cache.Items()
+ require.Len(t, items, 3)
+
+ require.Contains(t, items, "1")
+ assert.Equal(t, "1", items["1"].key)
+ require.Contains(t, items, "2")
+ assert.Equal(t, "2", items["2"].key)
+ require.Contains(t, items, "3")
+ assert.Equal(t, "3", items["3"].key)
+}
+
+func Test_Cache_Metrics(t *testing.T) {
+ cache := Cache[string, string]{
+ metrics: Metrics{Evictions: 10},
+ }
+
+ assert.Equal(t, Metrics{Evictions: 10}, cache.Metrics())
+}
+
+func Test_Cache_Start(t *testing.T) {
+ cache := prepCache(0)
+ cache.stopCh = make(chan struct{})
+
+ addToCache(cache, time.Nanosecond, "1")
+ time.Sleep(time.Millisecond) // force expiration
+
+ fn := func(r EvictionReason, _ *Item[string, string]) {
+ go func() {
+ assert.Equal(t, EvictionReasonExpired, r)
+
+ cache.metricsMu.RLock()
+ v := cache.metrics.Evictions
+ cache.metricsMu.RUnlock()
+
+ switch v {
+ case 1:
+ cache.items.mu.Lock()
+ addToCache(cache, time.Nanosecond, "2")
+ cache.items.mu.Unlock()
+ cache.options.ttl = time.Hour
+ cache.items.timerCh <- time.Millisecond
+ case 2:
+ cache.items.mu.Lock()
+ addToCache(cache, time.Second, "3")
+ addToCache(cache, NoTTL, "4")
+ cache.items.mu.Unlock()
+ cache.items.timerCh <- time.Millisecond
+ default:
+ close(cache.stopCh)
+ }
+ }()
+ }
+ cache.events.eviction.fns[1] = fn
+
+ cache.Start()
+}
+
+func Test_Cache_Stop(t *testing.T) {
+ cache := Cache[string, string]{
+ stopCh: make(chan struct{}, 1),
+ }
+ cache.Stop()
+ assert.Len(t, cache.stopCh, 1)
+}
+
+func Test_Cache_OnInsertion(t *testing.T) {
+ checkCh := make(chan struct{})
+ resCh := make(chan struct{})
+ cache := prepCache(time.Hour)
+ del1 := cache.OnInsertion(func(_ context.Context, _ *Item[string, string]) {
+ checkCh <- struct{}{}
+ })
+ del2 := cache.OnInsertion(func(_ context.Context, _ *Item[string, string]) {
+ checkCh <- struct{}{}
+ })
+
+ require.Len(t, cache.events.insertion.fns, 2)
+ assert.Equal(t, uint64(2), cache.events.insertion.nextID)
+
+ cache.events.insertion.fns[0](nil)
+
+ go func() {
+ del1()
+ resCh <- struct{}{}
+ }()
+ assert.Never(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*200, time.Millisecond*100)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-checkCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+
+ require.Len(t, cache.events.insertion.fns, 1)
+ assert.NotContains(t, cache.events.insertion.fns, uint64(0))
+ assert.Contains(t, cache.events.insertion.fns, uint64(1))
+
+ cache.events.insertion.fns[1](nil)
+
+ go func() {
+ del2()
+ resCh <- struct{}{}
+ }()
+ assert.Never(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*200, time.Millisecond*100)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-checkCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+
+ assert.Empty(t, cache.events.insertion.fns)
+ assert.NotContains(t, cache.events.insertion.fns, uint64(1))
+}
+
+func Test_Cache_OnEviction(t *testing.T) {
+ checkCh := make(chan struct{})
+ resCh := make(chan struct{})
+ cache := prepCache(time.Hour)
+ del1 := cache.OnEviction(func(_ context.Context, _ EvictionReason, _ *Item[string, string]) {
+ checkCh <- struct{}{}
+ })
+ del2 := cache.OnEviction(func(_ context.Context, _ EvictionReason, _ *Item[string, string]) {
+ checkCh <- struct{}{}
+ })
+
+ require.Len(t, cache.events.eviction.fns, 2)
+ assert.Equal(t, uint64(2), cache.events.eviction.nextID)
+
+ cache.events.eviction.fns[0](0, nil)
+
+ go func() {
+ del1()
+ resCh <- struct{}{}
+ }()
+ assert.Never(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*200, time.Millisecond*100)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-checkCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+
+ require.Len(t, cache.events.eviction.fns, 1)
+ assert.NotContains(t, cache.events.eviction.fns, uint64(0))
+ assert.Contains(t, cache.events.eviction.fns, uint64(1))
+
+ cache.events.eviction.fns[1](0, nil)
+
+ go func() {
+ del2()
+ resCh <- struct{}{}
+ }()
+ assert.Never(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*200, time.Millisecond*100)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-checkCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+ assert.Eventually(t, func() bool {
+ select {
+ case <-resCh:
+ return true
+ default:
+ return false
+ }
+ }, time.Millisecond*500, time.Millisecond*250)
+
+ assert.Empty(t, cache.events.eviction.fns)
+ assert.NotContains(t, cache.events.eviction.fns, uint64(1))
+}
+
+func Test_LoaderFunc_Load(t *testing.T) {
+ var called bool
+
+ fn := LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ called = true
+ return nil
+ })
+
+ assert.Nil(t, fn(nil, ""))
+ assert.True(t, called)
+}
+
+func Test_NewSuppressedLoader(t *testing.T) {
+ var called bool
+
+ loader := LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ called = true
+ return nil
+ })
+
+ // uses the provided loader and group parameters
+ group := &singleflight.Group{}
+
+ sl := NewSuppressedLoader[string, string](loader, group)
+ require.NotNil(t, sl)
+ require.NotNil(t, sl.loader)
+
+ sl.loader.Load(nil, "")
+
+ assert.True(t, called)
+ assert.Equal(t, group, sl.group)
+
+ // uses the provided loader and automatically creates a new instance
+ // of *singleflight.Group as nil parameter is passed
+ called = false
+
+ sl = NewSuppressedLoader[string, string](loader, nil)
+ require.NotNil(t, sl)
+ require.NotNil(t, sl.loader)
+
+ sl.loader.Load(nil, "")
+
+ assert.True(t, called)
+ assert.NotNil(t, group, sl.group)
+}
+
+func Test_SuppressedLoader_Load(t *testing.T) {
+ var (
+ mu sync.Mutex
+ loadCalls int
+ releaseCh = make(chan struct{})
+ res *Item[string, string]
+ )
+
+ l := SuppressedLoader[string, string]{
+ loader: LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ mu.Lock()
+ loadCalls++
+ mu.Unlock()
+
+ <-releaseCh
+
+ if res == nil {
+ return nil
+ }
+
+ res1 := *res
+
+ return &res1
+ }),
+ group: &singleflight.Group{},
+ }
+
+ var (
+ wg sync.WaitGroup
+ item1, item2 *Item[string, string]
+ )
+
+ cache := prepCache(time.Hour)
+
+ // nil result
+ wg.Add(2)
+
+ go func() {
+ item1 = l.Load(cache, "test")
+ wg.Done()
+ }()
+
+ go func() {
+ item2 = l.Load(cache, "test")
+ wg.Done()
+ }()
+
+ time.Sleep(time.Millisecond * 100) // wait for goroutines to halt
+ releaseCh <- struct{}{}
+
+ wg.Wait()
+ require.Nil(t, item1)
+ require.Nil(t, item2)
+ assert.Equal(t, 1, loadCalls)
+
+ // non nil result
+ res = &Item[string, string]{key: "test"}
+ loadCalls = 0
+ wg.Add(2)
+
+ go func() {
+ item1 = l.Load(cache, "test")
+ wg.Done()
+ }()
+
+ go func() {
+ item2 = l.Load(cache, "test")
+ wg.Done()
+ }()
+
+ time.Sleep(time.Millisecond * 100) // wait for goroutines to halt
+ releaseCh <- struct{}{}
+
+ wg.Wait()
+ require.Same(t, item1, item2)
+ assert.Equal(t, "test", item1.key)
+ assert.Equal(t, 1, loadCalls)
+}
+
+func prepCache(ttl time.Duration, keys ...string) *Cache[string, string] {
+ c := &Cache[string, string]{}
+ c.options.ttl = ttl
+ c.items.values = make(map[string]*list.Element)
+ c.items.lru = list.New()
+ c.items.expQueue = newExpirationQueue[string, string]()
+ c.items.timerCh = make(chan time.Duration, 1)
+ c.events.eviction.fns = make(map[uint64]func(EvictionReason, *Item[string, string]))
+ c.events.insertion.fns = make(map[uint64]func(*Item[string, string]))
+
+ addToCache(c, ttl, keys...)
+
+ return c
+}
+
+func addToCache(c *Cache[string, string], ttl time.Duration, keys ...string) {
+ for i, key := range keys {
+ item := newItem(
+ key,
+ fmt.Sprint("value of", key),
+ ttl+time.Duration(i)*time.Minute,
+ )
+ elem := c.items.lru.PushFront(item)
+ c.items.values[key] = elem
+ c.items.expQueue.push(elem)
+ }
+}
diff --git a/expiration_queue.go b/expiration_queue.go
new file mode 100644
index 0000000..0f10458
--- /dev/null
+++ b/expiration_queue.go
@@ -0,0 +1,85 @@
+package ttlcache
+
+import (
+ "container/heap"
+ "container/list"
+)
+
+// expirationQueue stores items that are ordered by their expiration
+// timestamps. The 0th item is closest to its expiration.
+type expirationQueue[K comparable, V any] []*list.Element
+
+// newExpirationQueue creates and initializes a new expiration queue.
+func newExpirationQueue[K comparable, V any]() expirationQueue[K, V] {
+ q := make(expirationQueue[K, V], 0)
+ heap.Init(&q)
+ return q
+}
+
+// isEmpty checks if the queue is empty.
+func (q expirationQueue[K, V]) isEmpty() bool {
+ return q.Len() == 0
+}
+
+// update updates an existing item's value and position in the queue.
+func (q *expirationQueue[K, V]) update(elem *list.Element) {
+ heap.Fix(q, elem.Value.(*Item[K, V]).queueIndex)
+}
+
+// push pushes a new item into the queue and updates the order of its
+// elements.
+func (q *expirationQueue[K, V]) push(elem *list.Element) {
+ heap.Push(q, elem)
+}
+
+// remove removes an item from the queue and updates the order of its
+// elements.
+func (q *expirationQueue[K, V]) remove(elem *list.Element) {
+ heap.Remove(q, elem.Value.(*Item[K, V]).queueIndex)
+}
+
+// Len returns the total number of items in the queue.
+func (q expirationQueue[K, V]) Len() int {
+ return len(q)
+}
+
+// Less checks if the item at the i position expires sooner than
+// the one at the j position.
+func (q expirationQueue[K, V]) Less(i, j int) bool {
+ item1, item2 := q[i].Value.(*Item[K, V]), q[j].Value.(*Item[K, V])
+ if item1.expiresAt.IsZero() {
+ return false
+ }
+
+ if item2.expiresAt.IsZero() {
+ return true
+ }
+
+ return item1.expiresAt.Before(item2.expiresAt)
+}
+
+// Swap switches the places of two queue items.
+func (q expirationQueue[K, V]) Swap(i, j int) {
+ q[i], q[j] = q[j], q[i]
+ q[i].Value.(*Item[K, V]).queueIndex = i
+ q[j].Value.(*Item[K, V]).queueIndex = j
+}
+
+// Push appends a new item to the item slice.
+func (q *expirationQueue[K, V]) Push(x interface{}) {
+ elem := x.(*list.Element)
+ elem.Value.(*Item[K, V]).queueIndex = len(*q)
+ *q = append(*q, elem)
+}
+
+// Pop removes and returns the last item.
+func (q *expirationQueue[K, V]) Pop() interface{} {
+ old := *q
+ i := len(old) - 1
+ elem := old[i]
+ elem.Value.(*Item[K, V]).queueIndex = -1
+ old[i] = nil // avoid memory leak
+ *q = old[:i]
+
+ return elem
+}
diff --git a/expiration_queue_test.go b/expiration_queue_test.go
new file mode 100644
index 0000000..e6584c4
--- /dev/null
+++ b/expiration_queue_test.go
@@ -0,0 +1,194 @@
+package ttlcache
+
+import (
+ "container/list"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_newExpirationQueue(t *testing.T) {
+ assert.NotNil(t, newExpirationQueue[string, string]())
+}
+
+func Test_expirationQueue_isEmpty(t *testing.T) {
+ assert.True(t, (expirationQueue[string, string]{}).isEmpty())
+ assert.False(t, (expirationQueue[string, string]{{}}).isEmpty())
+}
+
+func Test_expirationQueue_update(t *testing.T) {
+ q := expirationQueue[string, string]{
+ {
+ Value: &Item[string, string]{
+ value: "test1",
+ queueIndex: 0,
+ expiresAt: time.Now().Add(time.Hour),
+ },
+ },
+ {
+ Value: &Item[string, string]{
+ value: "test2",
+ queueIndex: 1,
+ expiresAt: time.Now().Add(time.Minute),
+ },
+ },
+ }
+
+ q.update(q[1])
+ require.Len(t, q, 2)
+ assert.Equal(t, "test2", q[0].Value.(*Item[string, string]).value)
+}
+
+func Test_expirationQueue_push(t *testing.T) {
+ q := expirationQueue[string, string]{
+ {
+ Value: &Item[string, string]{
+ value: "test1",
+ queueIndex: 0,
+ expiresAt: time.Now().Add(time.Hour),
+ },
+ },
+ }
+ elem := &list.Element{
+ Value: &Item[string, string]{
+ value: "test2",
+ queueIndex: 1,
+ expiresAt: time.Now().Add(time.Minute),
+ },
+ }
+
+ q.push(elem)
+ require.Len(t, q, 2)
+ assert.Equal(t, "test2", q[0].Value.(*Item[string, string]).value)
+}
+
+func Test_expirationQueue_remove(t *testing.T) {
+ q := expirationQueue[string, string]{
+ {
+ Value: &Item[string, string]{
+ value: "test1",
+ queueIndex: 0,
+ expiresAt: time.Now().Add(time.Hour),
+ },
+ },
+ {
+ Value: &Item[string, string]{
+ value: "test2",
+ queueIndex: 1,
+ expiresAt: time.Now().Add(time.Minute),
+ },
+ },
+ }
+
+ q.remove(q[1])
+ require.Len(t, q, 1)
+ assert.Equal(t, "test1", q[0].Value.(*Item[string, string]).value)
+}
+
+func Test_expirationQueue_Len(t *testing.T) {
+ assert.Equal(t, 1, (expirationQueue[string, string]{{}}).Len())
+}
+
+func Test_expirationQueue_Less(t *testing.T) {
+ q := expirationQueue[string, string]{
+ {
+ Value: &Item[string, string]{
+ value: "test1",
+ queueIndex: 0,
+ expiresAt: time.Now().Add(time.Hour),
+ },
+ },
+ {
+ Value: &Item[string, string]{
+ value: "test2",
+ queueIndex: 1,
+ expiresAt: time.Now().Add(time.Minute),
+ },
+ },
+ {
+ Value: &Item[string, string]{
+ value: "test3",
+ queueIndex: 2,
+ },
+ },
+ }
+
+ assert.False(t, q.Less(2, 1))
+ assert.True(t, q.Less(1, 2))
+ assert.True(t, q.Less(1, 0))
+ assert.False(t, q.Less(0, 1))
+}
+
+func Test_expirationQueue_Swap(t *testing.T) {
+ q := expirationQueue[string, string]{
+ {
+ Value: &Item[string, string]{
+ value: "test1",
+ queueIndex: 0,
+ expiresAt: time.Now().Add(time.Hour),
+ },
+ },
+ {
+ Value: &Item[string, string]{
+ value: "test2",
+ queueIndex: 1,
+ expiresAt: time.Now().Add(time.Minute),
+ },
+ },
+ }
+
+ q.Swap(0, 1)
+ assert.Equal(t, "test2", q[0].Value.(*Item[string, string]).value)
+ assert.Equal(t, "test1", q[1].Value.(*Item[string, string]).value)
+}
+
+func Test_expirationQueue_Push(t *testing.T) {
+ q := expirationQueue[string, string]{
+ {
+ Value: &Item[string, string]{
+ value: "test1",
+ queueIndex: 0,
+ expiresAt: time.Now().Add(time.Hour),
+ },
+ },
+ }
+
+ elem := &list.Element{
+ Value: &Item[string, string]{
+ value: "test2",
+ queueIndex: 1,
+ expiresAt: time.Now().Add(time.Minute),
+ },
+ }
+
+ q.Push(elem)
+ require.Len(t, q, 2)
+ assert.Equal(t, "test2", q[1].Value.(*Item[string, string]).value)
+}
+
+func Test_expirationQueue_Pop(t *testing.T) {
+ q := expirationQueue[string, string]{
+ {
+ Value: &Item[string, string]{
+ value: "test1",
+ queueIndex: 0,
+ expiresAt: time.Now().Add(time.Hour),
+ },
+ },
+ {
+ Value: &Item[string, string]{
+ value: "test2",
+ queueIndex: 1,
+ expiresAt: time.Now().Add(time.Minute),
+ },
+ },
+ }
+
+ v := q.Pop()
+ require.NotNil(t, v)
+ assert.Equal(t, "test2", v.(*list.Element).Value.(*Item[string, string]).value)
+ require.Len(t, q, 1)
+ assert.Equal(t, "test1", q[0].Value.(*Item[string, string]).value)
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..b1cbffb
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,17 @@
+module github.com/jellydator/ttlcache/v3
+
+go 1.18
+
+require (
+ github.com/stretchr/testify v1.7.0
+ go.uber.org/goleak v1.1.10
+ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
+ golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 // indirect
+ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..3e87340
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,54 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 h1:BmCFkEH4nJrYcAc2L08yX5RhYGD4j58PTMkEUDkpz2I=
+golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/item.go b/item.go
new file mode 100644
index 0000000..58c9c5c
--- /dev/null
+++ b/item.go
@@ -0,0 +1,130 @@
+package ttlcache
+
+import (
+ "sync"
+ "time"
+)
+
+const (
+ // NoTTL indicates that an item should never expire.
+ NoTTL time.Duration = -1
+
+ // DefaultTTL indicates that the default TTL
+ // value should be used.
+ DefaultTTL time.Duration = 0
+)
+
+// Item holds all the information that is associated with a single
+// cache value.
+type Item[K comparable, V any] struct {
+ // the mutex needs to be locked only when:
+ // - data fields are being read inside accessor methods
+ // - data fields are being updated
+ // when data fields are being read in one of the cache's
+ // methods, we can be sure that these fields are not modified in
+ // parallel since the item list is locked by its own mutex as
+ // well, so locking this mutex would be redundant.
+ // In other words, this mutex is only useful when these fields
+ // are being read from the outside (e.g. in event functions).
+ mu sync.RWMutex
+ key K
+ value V
+ ttl time.Duration
+ expiresAt time.Time
+ queueIndex int
+}
+
+// newItem creates a new cache item.
+func newItem[K comparable, V any](key K, value V, ttl time.Duration) *Item[K, V] {
+ item := &Item[K, V]{
+ key: key,
+ value: value,
+ ttl: ttl,
+ }
+ item.touch()
+
+ return item
+}
+
+// update modifies the item's value and TTL.
+func (item *Item[K, V]) update(value V, ttl time.Duration) {
+ item.mu.Lock()
+ defer item.mu.Unlock()
+
+ item.value = value
+ item.ttl = ttl
+
+ // reset expiration timestamp because the new TTL may be
+ // 0 or below
+ item.expiresAt = time.Time{}
+ item.touchUnsafe()
+}
+
+// touch updates the item's expiration timestamp.
+func (item *Item[K, V]) touch() {
+ item.mu.Lock()
+ defer item.mu.Unlock()
+
+ item.touchUnsafe()
+}
+
+// touchUnsafe updates the item's expiration timestamp without
+// locking the mutex.
+func (item *Item[K, V]) touchUnsafe() {
+ if item.ttl <= 0 {
+ return
+ }
+
+ item.expiresAt = time.Now().Add(item.ttl)
+}
+
+// IsExpired returns a bool value that indicates whether the item
+// is expired.
+func (item *Item[K, V]) IsExpired() bool {
+ item.mu.RLock()
+ defer item.mu.RUnlock()
+
+ return item.isExpiredUnsafe()
+}
+
+// isExpiredUnsafe returns a bool value that indicates whether the
+// the item is expired without locking the mutex
+func (item *Item[K, V]) isExpiredUnsafe() bool {
+ if item.ttl <= 0 {
+ return false
+ }
+
+ return item.expiresAt.Before(time.Now())
+}
+
+// Key returns the key of the item.
+func (item *Item[K, V]) Key() K {
+ item.mu.RLock()
+ defer item.mu.RUnlock()
+
+ return item.key
+}
+
+// Value returns the value of the item.
+func (item *Item[K, V]) Value() V {
+ item.mu.RLock()
+ defer item.mu.RUnlock()
+
+ return item.value
+}
+
+// TTL returns the TTL value of the item.
+func (item *Item[K, V]) TTL() time.Duration {
+ item.mu.RLock()
+ defer item.mu.RUnlock()
+
+ return item.ttl
+}
+
+// ExpiresAt returns the expiration timestamp of the item.
+func (item *Item[K, V]) ExpiresAt() time.Time {
+ item.mu.RLock()
+ defer item.mu.RUnlock()
+
+ return item.expiresAt
+}
diff --git a/item_test.go b/item_test.go
new file mode 100644
index 0000000..0eb306e
--- /dev/null
+++ b/item_test.go
@@ -0,0 +1,95 @@
+package ttlcache
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_newItem(t *testing.T) {
+ item := newItem("key", 123, time.Hour)
+ require.NotNil(t, item)
+ assert.Equal(t, "key", item.key)
+ assert.Equal(t, 123, item.value)
+ assert.Equal(t, time.Hour, item.ttl)
+ assert.WithinDuration(t, time.Now().Add(time.Hour), item.expiresAt, time.Minute)
+}
+
+func Test_Item_update(t *testing.T) {
+ item := Item[string, string]{
+ expiresAt: time.Now().Add(-time.Hour),
+ value: "hello",
+ }
+
+ item.update("test", time.Hour)
+ assert.Equal(t, "test", item.value)
+ assert.Equal(t, time.Hour, item.ttl)
+ assert.WithinDuration(t, time.Now().Add(time.Hour), item.expiresAt, time.Minute)
+
+ item.update("hi", NoTTL)
+ assert.Equal(t, "hi", item.value)
+ assert.Equal(t, NoTTL, item.ttl)
+ assert.Zero(t, item.expiresAt)
+}
+
+func Test_Item_touch(t *testing.T) {
+ var item Item[string, string]
+ item.touch()
+ assert.Zero(t, item.expiresAt)
+
+ item.ttl = time.Hour
+ item.touch()
+ assert.WithinDuration(t, time.Now().Add(time.Hour), item.expiresAt, time.Minute)
+}
+
+func Test_Item_IsExpired(t *testing.T) {
+ // no ttl
+ item := Item[string, string]{
+ expiresAt: time.Now().Add(-time.Hour),
+ }
+
+ assert.False(t, item.IsExpired())
+
+ // expired
+ item.ttl = time.Hour
+ assert.True(t, item.IsExpired())
+
+ // not expired
+ item.expiresAt = time.Now().Add(time.Hour)
+ assert.False(t, item.IsExpired())
+}
+
+func Test_Item_Key(t *testing.T) {
+ item := Item[string, string]{
+ key: "test",
+ }
+
+ assert.Equal(t, "test", item.Key())
+}
+
+func Test_Item_Value(t *testing.T) {
+ item := Item[string, string]{
+ value: "test",
+ }
+
+ assert.Equal(t, "test", item.Value())
+}
+
+func Test_Item_TTL(t *testing.T) {
+ item := Item[string, string]{
+ ttl: time.Hour,
+ }
+
+ assert.Equal(t, time.Hour, item.TTL())
+}
+
+func Test_Item_ExpiresAt(t *testing.T) {
+ now := time.Now()
+ item := Item[string, string]{
+ expiresAt: now,
+ }
+
+ assert.Equal(t, now, item.ExpiresAt())
+}
diff --git a/metrics.go b/metrics.go
new file mode 100644
index 0000000..4bf8559
--- /dev/null
+++ b/metrics.go
@@ -0,0 +1,21 @@
+package ttlcache
+
+// Metrics contains common cache metrics calculated over the course
+// of the cache's lifetime.
+type Metrics struct {
+ // Insertions specifies how many items were inserted.
+ Insertions uint64
+
+ // Hits specifies how many items were successfully retrieved
+ // from the cache.
+ // Retrievals made with a loader function are not tracked.
+ Hits uint64
+
+ // Misses specifies how many items were not found in the cache.
+ // Retrievals made with a loader function are tracked as well.
+ Misses uint64
+
+ // Evictions specifies how many items were removed from the
+ // cache.
+ Evictions uint64
+}
diff --git a/options.go b/options.go
new file mode 100644
index 0000000..3acf88d
--- /dev/null
+++ b/options.go
@@ -0,0 +1,67 @@
+package ttlcache
+
+import "time"
+
+// Option sets a specific cache option.
+type Option[K comparable, V any] interface {
+ apply(opts *options[K, V])
+}
+
+// optionFunc wraps a function and implements the Option interface.
+type optionFunc[K comparable, V any] func(*options[K, V])
+
+// apply calls the wrapped function.
+func (fn optionFunc[K, V]) apply(opts *options[K, V]) {
+ fn(opts)
+}
+
+// options holds all available cache configuration options.
+type options[K comparable, V any] struct {
+ capacity uint64
+ ttl time.Duration
+ loader Loader[K, V]
+ disableTouchOnHit bool
+}
+
+// applyOptions applies the provided option values to the option struct.
+func applyOptions[K comparable, V any](v *options[K, V], opts ...Option[K, V]) {
+ for i := range opts {
+ opts[i].apply(v)
+ }
+}
+
+// WithCapacity sets the maximum capacity of the cache.
+// It has no effect when passing into Get().
+func WithCapacity[K comparable, V any](c uint64) Option[K, V] {
+ return optionFunc[K, V](func(opts *options[K, V]) {
+ opts.capacity = c
+ })
+}
+
+// WithTTL sets the TTL of the cache.
+// It has no effect when passing into Get().
+func WithTTL[K comparable, V any](ttl time.Duration) Option[K, V] {
+ return optionFunc[K, V](func(opts *options[K, V]) {
+ opts.ttl = ttl
+ })
+}
+
+// WithLoader sets the loader of the cache.
+// When passing into Get(), it sets an epheral loader that
+// is used instead of the cache's default one.
+func WithLoader[K comparable, V any](l Loader[K, V]) Option[K, V] {
+ return optionFunc[K, V](func(opts *options[K, V]) {
+ opts.loader = l
+ })
+}
+
+// WithDisableTouchOnHit prevents the cache instance from
+// extending/touching an item's expiration timestamp when it is being
+// retrieved.
+// When passing into Get(), it overrides the default value of the
+// cache.
+func WithDisableTouchOnHit[K comparable, V any]() Option[K, V] {
+ return optionFunc[K, V](func(opts *options[K, V]) {
+ opts.disableTouchOnHit = true
+ })
+}
diff --git a/options_test.go b/options_test.go
new file mode 100644
index 0000000..6b286c1
--- /dev/null
+++ b/options_test.go
@@ -0,0 +1,60 @@
+package ttlcache
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_optionFunc_apply(t *testing.T) {
+ var called bool
+
+ optionFunc[string, string](func(_ *options[string, string]) {
+ called = true
+ }).apply(nil)
+ assert.True(t, called)
+}
+
+func Test_applyOptions(t *testing.T) {
+ var opts options[string, string]
+
+ applyOptions(&opts,
+ WithCapacity[string, string](12),
+ WithTTL[string, string](time.Hour),
+ )
+
+ assert.Equal(t, uint64(12), opts.capacity)
+ assert.Equal(t, time.Hour, opts.ttl)
+}
+
+func Test_WithCapacity(t *testing.T) {
+ var opts options[string, string]
+
+ WithCapacity[string, string](12).apply(&opts)
+ assert.Equal(t, uint64(12), opts.capacity)
+}
+
+func Test_WithTTL(t *testing.T) {
+ var opts options[string, string]
+
+ WithTTL[string, string](time.Hour).apply(&opts)
+ assert.Equal(t, time.Hour, opts.ttl)
+}
+
+func Test_WithLoader(t *testing.T) {
+ var opts options[string, string]
+
+ l := LoaderFunc[string, string](func(_ *Cache[string, string], _ string) *Item[string, string] {
+ return nil
+ })
+ WithLoader[string, string](l).apply(&opts)
+ assert.NotNil(t, opts.loader)
+}
+
+func Test_WithDisableTouchOnHit(t *testing.T) {
+ var opts options[string, string]
+
+ WithDisableTouchOnHit[string, string]().apply(&opts)
+ assert.True(t, opts.disableTouchOnHit)
+}