Switch to using TwinProduction/gocache for caching service status
This commit is contained in:
1
vendor/github.com/TwinProduction/gocache/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/TwinProduction/gocache/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* text=lf
|
1
vendor/github.com/TwinProduction/gocache/.gitignore
generated
vendored
Normal file
1
vendor/github.com/TwinProduction/gocache/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
.idea
|
20
vendor/github.com/TwinProduction/gocache/Dockerfile
generated
vendored
Normal file
20
vendor/github.com/TwinProduction/gocache/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# Build the go application into a binary
|
||||
FROM golang:alpine as builder
|
||||
WORKDIR /app
|
||||
ADD . ./
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -mod vendor -a -installsuffix cgo -o bin/gocache-server ./gocacheserver/main
|
||||
RUN apk --update add --no-cache ca-certificates
|
||||
|
||||
FROM scratch
|
||||
ENV APP_HOME=/app
|
||||
ENV APP_DATA=/app/data
|
||||
ENV PORT=6379
|
||||
ENV MAX_CACHE_SIZE=100000
|
||||
ENV MAX_MEMORY_USAGE=0
|
||||
ENV AUTOSAVE="false"
|
||||
VOLUME ${APP_DATA}
|
||||
WORKDIR ${APP_HOME}
|
||||
COPY --from=builder /app/bin/gocache-server ./bin/gocache-server
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE ${PORT}
|
||||
ENTRYPOINT ["/app/bin/gocache-server"]
|
9
vendor/github.com/TwinProduction/gocache/LICENSE.md
generated
vendored
Normal file
9
vendor/github.com/TwinProduction/gocache/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 TwinProduction
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
24
vendor/github.com/TwinProduction/gocache/Makefile
generated
vendored
Normal file
24
vendor/github.com/TwinProduction/gocache/Makefile
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
db: docker-build
|
||||
dr: docker-run
|
||||
drmem: docker-run-max-memory-usage
|
||||
|
||||
docker-build:
|
||||
docker build --tag=gocache-server .
|
||||
|
||||
docker-run:
|
||||
docker run -p 6666:6379 -e AUTOSAVE=true -e MAX_CACHE_SIZE=0 --name gocache-server -d gocache-server
|
||||
|
||||
docker-run-max-memory-usage:
|
||||
docker run -p 6666:6379 -e AUTOSAVE=true -e MAX_CACHE_SIZE=0 -e MAX_MEMORY_USAGE=524288000 --name gocache-server -d gocache-server
|
||||
|
||||
run:
|
||||
PORT=6666 go run gocacheserver/main/server.go
|
||||
|
||||
start-redis:
|
||||
docker run -p 6379:6379 --name redis -d redis
|
||||
|
||||
redis-benchmark:
|
||||
redis-benchmark -p 6666 -t set,get -n 10000000 -r 200000 -q -P 512 -c 512
|
||||
|
||||
memtier-benchmark:
|
||||
memtier_benchmark --port 6666 --hide-histogram --key-maximum 100000 --ratio 1:1 --expiry-range 1-100 --key-pattern R:R --randomize -n 100000
|
425
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
Normal file
425
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
Normal file
@ -0,0 +1,425 @@
|
||||
# gocache
|
||||
|
||||

|
||||
[](https://goreportcard.com/report/github.com/TwinProduction/gocache)
|
||||
[](https://codecov.io/gh/TwinProduction/gocache)
|
||||
[](https://github.com/TwinProduction/gocache)
|
||||
[](https://godoc.org/github.com/TwinProduction/gocache)
|
||||
[](https://cloud.docker.com/repository/docker/twinproduction/gocache-server)
|
||||
|
||||
gocache is an easy-to-use, high-performance, lightweight and thread-safe (goroutine-safe) in-memory key-value cache
|
||||
with support for LRU and FIFO eviction policies as well as expiration, bulk operations and even persistence to file.
|
||||
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Features](#features)
|
||||
- [Usage](#usage)
|
||||
- [Initializing the cache](#initializing-the-cache)
|
||||
- [Functions](#functions)
|
||||
- [Examples](#examples)
|
||||
- [Creating or updating an entry](#creating-or-updating-an-entry)
|
||||
- [Getting an entry](#getting-an-entry)
|
||||
- [Deleting an entry](#deleting-an-entry)
|
||||
- [Complex example](#complex-example)
|
||||
- [Persistence](#persistence)
|
||||
- [Limitations](#limitations)
|
||||
- [Eviction](#eviction)
|
||||
- [MaxSize](#maxsize)
|
||||
- [MaxMemoryUsage](#maxmemoryusage)
|
||||
- [Server](#server)
|
||||
- [Running the server with Docker](#running-the-server-with-docker)
|
||||
- [Performance](#performance)
|
||||
- [Summary](#summary)
|
||||
- [Results](#results)
|
||||
- [FAQ](#faq)
|
||||
- [Why does the memory usage not go down?](#why-does-the-memory-usage-not-go-down)
|
||||
|
||||
|
||||
## Features
|
||||
gocache supports the following cache eviction policies:
|
||||
- First in first out (FIFO)
|
||||
- Least recently used (LRU)
|
||||
|
||||
It also supports cache entry TTL, which is both active and passive. Active expiration means that if you attempt
|
||||
to retrieve a cache key that has already expired, it will delete it on the spot and the behavior will be as if
|
||||
the cache key didn't exist. As for passive expiration, there's a background task that will take care of deleting
|
||||
expired keys.
|
||||
|
||||
It also includes what you'd expect from a cache, like bulk operations, persistence and patterns.
|
||||
|
||||
While meant to be used as a library, there's a Redis-compatible cache server included.
|
||||
See the [Server](#server) section.
|
||||
It may also serve as a good reference to use in order to implement gocache in your own applications.
|
||||
|
||||
|
||||
## Usage
|
||||
```
|
||||
go get -u github.com/TwinProduction/gocache
|
||||
```
|
||||
|
||||
### Initializing the cache
|
||||
```go
|
||||
cache := gocache.NewCache().WithMaxSize(1000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
||||
```
|
||||
|
||||
If you're planning on using expiration (`SetWithTTL` or `Expire`) and you want expired entries to be automatically deleted
|
||||
in the background, make sure to start the janitor when you instantiate the cache:
|
||||
|
||||
```go
|
||||
cache.StartJanitor()
|
||||
```
|
||||
|
||||
### Functions
|
||||
|
||||
| Function | Description |
|
||||
| ------------------ | ----------- |
|
||||
| WithMaxSize | Sets the max size of the cache. `gocache.NoMaxSize` means there is no limit. If not set, the default max size is `gocache.DefaultMaxSize`.
|
||||
| WithMaxMemoryUsage | Sets the max memory usage of the cache. `gocache.NoMaxMemoryUsage` means there is no limit. The default behavior is to not evict based on memory usage.
|
||||
| WithEvictionPolicy | Sets the eviction algorithm to be used when the cache reaches the max size. If not set, the default eviction policy is `gocache.FirstInFirstOut` (FIFO).
|
||||
| StartJanitor | Starts the janitor, which is in charge of deleting expired cache entries in the background.
|
||||
| StopJanitor | Stops the janitor.
|
||||
| Set | Same as `SetWithTTL`, but with no expiration (`gocache.NoExpiration`)
|
||||
| SetAll | Same as `Set`, but in bulk
|
||||
| SetWithTTL | Creates or updates a cache entry with the given key, value and expiration time. If the max size after the aforementioned operation is above the configured max size, the tail will be evicted. Depending on the eviction policy, the tail is defined as the oldest
|
||||
| Get | Gets a cache entry by its key.
|
||||
| GetAll | Gets a map of entries by their keys. The resulting map will contain all keys, even if some of the keys in the slice passed as parameter were not present in the cache.
|
||||
| GetKeysByPattern | Retrieves a slice of keys that matches a given pattern.
|
||||
| Delete | Removes a key from the cache.
|
||||
| DeleteAll | Removes multiple keys from the cache.
|
||||
| Count | Gets the size of the cache. This includes cache keys which may have already expired, but have not been removed yet.
|
||||
| Clear | Wipes the cache.
|
||||
| TTL | Gets the time until a cache key expires.
|
||||
| Expire | Sets the expiration time of an existing cache key.
|
||||
| SaveToFile | Stores the content of the cache to a file so that it can be read using `ReadFromFile`. See [persistence](#persistence).
|
||||
| ReadFromFile | Populates the cache using a file created using `SaveToFile`. See [persistence](#persistence).
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
#### Creating or updating an entry
|
||||
```go
|
||||
cache.Set("key", "value")
|
||||
cache.Set("key", 1)
|
||||
cache.Set("key", struct{ Text string }{Test: "value"})
|
||||
```
|
||||
|
||||
#### Getting an entry
|
||||
```go
|
||||
value, ok := cache.Get("key")
|
||||
```
|
||||
You can also get multiple entries by using `cache.GetAll([]string{"key1", "key2"})`
|
||||
|
||||
#### Deleting an entry
|
||||
```go
|
||||
cache.Delete("key")
|
||||
```
|
||||
You can also delete multiple entries by using `cache.DeleteAll([]string{"key1", "key2"})`
|
||||
|
||||
#### Complex example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/TwinProduction/gocache"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(10000)
|
||||
cache.StartJanitor() // Passively manages expired entries
|
||||
|
||||
cache.Set("key", "value")
|
||||
cache.SetWithTTL("key-with-ttl", "value", 60*time.Minute)
|
||||
cache.SetAll(map[string]interface{}{"k1": "v1", "k2": "v2", "k3": "v3"})
|
||||
|
||||
value, exists := cache.Get("key")
|
||||
fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists)
|
||||
for key, value := range cache.GetAll([]string{"k1", "k2", "k3"}) {
|
||||
fmt.Printf("[GetAll] key=%s; value=%s\n", key, value)
|
||||
}
|
||||
for _, key := range cache.GetKeysByPattern("key*", 0) {
|
||||
fmt.Printf("[GetKeysByPattern] key=%s\n", key)
|
||||
}
|
||||
|
||||
fmt.Println("Cache size before persisting cache to file:", cache.Count())
|
||||
err := cache.SaveToFile("cache.bak")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to persist cache to file: %s", err.Error()))
|
||||
}
|
||||
|
||||
cache.Expire("key", time.Hour)
|
||||
time.Sleep(500*time.Millisecond)
|
||||
timeUntilExpiration, _ := cache.TTL("key")
|
||||
fmt.Println("Number of minutes before 'key' expires:", int(timeUntilExpiration.Seconds()))
|
||||
|
||||
cache.Delete("key")
|
||||
cache.DeleteAll([]string{"k1", "k2", "k3"})
|
||||
|
||||
fmt.Println("Cache size before restoring cache from file:", cache.Count())
|
||||
_, err = cache.ReadFromFile("cache.bak")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to restore cache from file: %s", err.Error()))
|
||||
}
|
||||
|
||||
fmt.Println("Cache size after restoring cache from file:", cache.Count())
|
||||
cache.Clear()
|
||||
fmt.Println("Cache size after clearing the cache:", cache.Count())
|
||||
}
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Output</summary>
|
||||
|
||||
```
|
||||
[Get] key=key; value=value; exists=true
|
||||
[GetAll] key=k2; value=v2
|
||||
[GetAll] key=k3; value=v3
|
||||
[GetAll] key=k1; value=v1
|
||||
[GetKeysByPattern] key=key
|
||||
[GetKeysByPattern] key=key-with-ttl
|
||||
Cache size before persisting cache to file: 5
|
||||
Number of minutes before 'key' expires: 3599
|
||||
Cache size before restoring cache from file: 1
|
||||
Cache size after restoring cache from file: 5
|
||||
Cache size after clearing the cache: 0
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
## Persistence
|
||||
While gocache is an in-memory cache, you can still save the content of the cache in a file
|
||||
and vice versa.
|
||||
|
||||
To save the content of the cache to a file:
|
||||
```go
|
||||
err := cache.SaveToFile(TestCacheFile)
|
||||
```
|
||||
|
||||
To retrieve the content of the cache from a file:
|
||||
```go
|
||||
numberOfEntriesEvicted, err := newCache.ReadFromFile(TestCacheFile)
|
||||
```
|
||||
The `numberOfEntriesEvicted` will be non-zero only if the number of entries
|
||||
in the file is higher than the cache's configured `MaxSize`.
|
||||
|
||||
### Limitations
|
||||
While you can cache structs in memory out of the box, persisting structs to a file requires you to
|
||||
**register the custom interfaces that your application uses with the `gob` package**.
|
||||
|
||||
```go
|
||||
type YourCustomStruct struct {
|
||||
A string
|
||||
B int
|
||||
}
|
||||
|
||||
// ...
|
||||
cache.Set("key", YourCustomStruct{A: "test", B: 123})
|
||||
```
|
||||
To persist your custom struct properly:
|
||||
```go
|
||||
gob.Register(YourCustomStruct{})
|
||||
cache.SaveToFile("gocache.bak")
|
||||
```
|
||||
The same applies for restoring the cache from a file:
|
||||
```go
|
||||
cache := NewCache()
|
||||
gob.Register(YourCustomStruct{})
|
||||
cache.ReadFromFile(TestCacheFile)
|
||||
value, _ := cache.Get("key")
|
||||
fmt.Println(value.(YourCustomStruct))
|
||||
```
|
||||
You only need to persist the struct once, so adding the following function in a file would suffice:
|
||||
```go
|
||||
func init() {
|
||||
gob.Register(YourCustomStruct{})
|
||||
}
|
||||
```
|
||||
|
||||
Failure to register your custom structs will prevent gocache from persisting and/or parsing the value of each keys that
|
||||
use said custom structs.
|
||||
|
||||
That being said, assuming that you're using gocache as a cache, this shouldn't create any bugs on your end, because
|
||||
every key that cannot be parsed are not populated into the cache by `ReadFromFile`.
|
||||
|
||||
In other words, if you're falling back to a database or something similar when the cache doesn't have the key requested,
|
||||
you'll be fine.
|
||||
|
||||
|
||||
## Eviction
|
||||
### MaxSize
|
||||
Eviction by MaxSize is the default behavior, and is also the most efficient.
|
||||
|
||||
The code below will create a cache that has a maximum size of 1000:
|
||||
```go
|
||||
cache := gocache.NewCache().WithMaxSize(1000)
|
||||
```
|
||||
This means that whenever an operation causes the total size of the cache to go above 1000, the tail will be evicted.
|
||||
|
||||
### MaxMemoryUsage
|
||||
Eviction by MaxMemoryUsage is **disabled by default**, and is still a work in progress.
|
||||
|
||||
The code below will create a cache that has a maximum memory usage of 50MB:
|
||||
```go
|
||||
cache := gocache.NewCache().WithMaxSize(0).WithMaxMemoryUsage(50*gocache.Megabyte)
|
||||
```
|
||||
This means that whenever an operation causes the total memory usage of the cache to go above 50MB, one or more tails
|
||||
will be evicted.
|
||||
|
||||
Unlike evictions caused by reaching the MaxSize, evictions triggered by MaxMemoryUsage may lead to multiple entries
|
||||
being evicted in a row. The reason for this is that if, for instance, you had 500 entries of 0.1MB each and you suddenly added
|
||||
a single entry of 10MB, 100 entries would need to be evicted to make enough space for that new big entry.
|
||||
|
||||
It's very important to keep in mind that eviction by MaxMemoryUsage is approximate.
|
||||
|
||||
**The only memory taken into consideration is the size of the cache, not the size of the entire application.**
|
||||
If you pass along 100MB worth of data in a matter of seconds, even though the cache's memory usage will remain
|
||||
under 50MB (or whatever you configure the MaxMemoryUsage to), the memory footprint generated by that 100MB will
|
||||
still exist until the next GC cycle.
|
||||
|
||||
As previously mentioned, this is a work in progress, and here's a list of the things you should keep in mind:
|
||||
- The memory usage of structs are a gross estimation and may not reflect the actual memory usage.
|
||||
- Native types (string, int, bool, []byte, etc.) are the most accurate for calculating the memory usage.
|
||||
- Adding an entry bigger than the configured MaxMemoryUsage will work, but it will evict all other entries.
|
||||
|
||||
|
||||
## Server
|
||||
For the sake of convenience, a ready-to-go cache server is available
|
||||
through the `gocacheserver` package.
|
||||
|
||||
The reason why the server is in a different package is because `gocache` does not use
|
||||
any external dependencies, but rather than re-inventing the wheel, the server
|
||||
implementation uses redcon, which is a Redis server framework for Go.
|
||||
|
||||
That way, those who desire to use gocache without the server will not add any extra dependencies
|
||||
as long as they don't import the `gocacheserver` package.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gocache"
|
||||
"github.com/TwinProduction/gocache/gocacheserver"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(100000)
|
||||
server := gocacheserver.NewServer(cache)
|
||||
server.Start()
|
||||
}
|
||||
```
|
||||
|
||||
Any Redis client should be able to interact with the server, though only the following instructions are supported:
|
||||
- [X] GET
|
||||
- [X] SET
|
||||
- [X] DEL
|
||||
- [X] PING
|
||||
- [X] QUIT
|
||||
- [X] INFO
|
||||
- [X] EXPIRE
|
||||
- [X] SETEX
|
||||
- [X] TTL
|
||||
- [X] FLUSHDB
|
||||
- [X] EXISTS
|
||||
- [X] ECHO
|
||||
- [X] MGET
|
||||
- [X] MSET
|
||||
- [X] SCAN (kind of - cursor is not currently supported)
|
||||
- [ ] KEYS
|
||||
|
||||
|
||||
## Running the server with Docker
|
||||
To build it locally, refer to the Makefile's `docker-build` and `docker-run` steps.
|
||||
|
||||
Note that the server version of gocache is still under development.
|
||||
|
||||
```
|
||||
docker run --name gocache-server -p 6379:6379 twinproduction/gocache-server:v0.1.0
|
||||
```
|
||||
|
||||
|
||||
## Performance
|
||||
|
||||
### Summary
|
||||
- **Set**: Both map and gocache have the same performance.
|
||||
- **Get**: Map is faster than gocache.
|
||||
|
||||
This is because gocache keeps track of the head and the tail for eviction and expiration/TTL.
|
||||
|
||||
Ultimately, the difference is negligible.
|
||||
|
||||
We could add a way to disable eviction or disable expiration altogether just to match the map's performance,
|
||||
but if you're looking into using a library like gocache, odds are, you want more than just a map.
|
||||
|
||||
|
||||
### Results
|
||||
| key | value |
|
||||
|:------ |:-------- |
|
||||
| goos | windows |
|
||||
| goarch | amd64 |
|
||||
| cpu | i7-9700K |
|
||||
| mem | 32G DDR4 |
|
||||
|
||||
```
|
||||
BenchmarkMap_Get-8 47943618 26.6 ns/op
|
||||
BenchmarkMap_SetSmallValue-8 3800810 394 ns/op
|
||||
BenchmarkMap_SetMediumValue-8 3904794 400 ns/op
|
||||
BenchmarkMap_SetLargeValue-8 3934033 383 ns/op
|
||||
BenchmarkCache_Get-8 27254640 45.0 ns/op
|
||||
BenchmarkCache_SetSmallValue-8 2991620 401 ns/op
|
||||
BenchmarkCache_SetMediumValue-8 3051128 381 ns/op
|
||||
BenchmarkCache_SetLargeValue-8 2995904 382 ns/op
|
||||
BenchmarkCache_SetSmallValueWhenUsingMaxMemoryUsage-8 2752288 428 ns/op
|
||||
BenchmarkCache_SetMediumValueWhenUsingMaxMemoryUsage-8 2744899 436 ns/op
|
||||
BenchmarkCache_SetLargeValueWhenUsingMaxMemoryUsage-8 2756816 430 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize10-8 5308886 226 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize10-8 5304098 226 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize10-8 5277986 227 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize1000-8 5130580 236 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize1000-8 5102404 237 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize1000-8 5084695 237 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize100000-8 3858066 315 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize100000-8 3909277 315 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize100000-8 3870913 315 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize100000AndLRU-8 3856012 316 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize100000AndLRU-8 3809518 316 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize100000AndLRU-8 3834754 318 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrently-8 1779258 672 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndLRU-8 2569590 487 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndFIFO-8 2608369 474 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndLRU-8 2185795 582 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndFIFO-8 2238811 568 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndLRU-8 3726714 320 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndFIFO-8 3682808 325 ns/op
|
||||
BenchmarkCache_GetConcurrentlyWithLRU-8 1536589 739 ns/op
|
||||
BenchmarkCache_GetConcurrentlyWithFIFO-8 1558513 737 ns/op
|
||||
BenchmarkCache_GetKeysThatDoNotExistConcurrently-8 10173138 119 ns/op
|
||||
```
|
||||
|
||||
|
||||
## FAQ
|
||||
|
||||
### Why does the memory usage not go down?
|
||||
By default, Go uses `MADV_FREE` if the kernel supports it to release memory, which is significantly more efficient
|
||||
than using `MADV_DONTNEED`. Unfortunately, this means that RSS doesn't go down unless the OS actually needs the
|
||||
memory.
|
||||
|
||||
Technically, the memory _is_ available to the kernel, even if it shows a high memory usage, but the OS will only
|
||||
use that memory if it needs to. In the case that the OS does need the freed memory, the RSS will go down and you'll
|
||||
notice the memory usage lowering.
|
||||
|
||||
[reference](https://github.com/golang/go/issues/33376#issuecomment-666455792)
|
||||
|
||||
You can reproduce this by following the steps below:
|
||||
- Start gocacheserver
|
||||
- Note the memory usage
|
||||
- Create 500k keys
|
||||
- Note the memory usage
|
||||
- Flush the cache
|
||||
- Note that the memory usage has not decreased, despite the cache being empty.
|
||||
|
||||
**Substituting gocache for a normal map will yield the same result.**
|
||||
|
||||
If the released memory still appearing as used is a problem for you,
|
||||
you can set the environment variable `GODEBUG` to `madvdontneed=1`.
|
101
vendor/github.com/TwinProduction/gocache/entry.go
generated
vendored
Normal file
101
vendor/github.com/TwinProduction/gocache/entry.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
package gocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Entry struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
|
||||
// RelevantTimestamp is the variable used to store either:
|
||||
// - creation timestamp, if the Cache's EvictionPolicy is FirstInFirstOut
|
||||
// - last access timestamp, if the Cache's EvictionPolicy is LeastRecentlyUsed
|
||||
//
|
||||
// Note that updating an existing entry will also update this value
|
||||
RelevantTimestamp time.Time
|
||||
|
||||
// Expiration is the unix time in nanoseconds at which the entry will expire (-1 means no expiration)
|
||||
Expiration int64
|
||||
|
||||
next *Entry
|
||||
previous *Entry
|
||||
}
|
||||
|
||||
func (entry *Entry) Accessed() {
|
||||
entry.RelevantTimestamp = time.Now()
|
||||
}
|
||||
|
||||
func (entry Entry) Expired() bool {
|
||||
if entry.Expiration > 0 {
|
||||
if time.Now().UnixNano() > entry.Expiration {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (entry *Entry) SizeInBytes() int {
|
||||
return toBytes(entry.Key) + toBytes(entry.Value) + 32
|
||||
}
|
||||
|
||||
func toBytes(value interface{}) int {
|
||||
switch value.(type) {
|
||||
case string:
|
||||
return int(unsafe.Sizeof(value)) + len(value.(string))
|
||||
case int8, uint8, bool:
|
||||
return int(unsafe.Sizeof(value)) + 1
|
||||
case int16, uint16:
|
||||
return int(unsafe.Sizeof(value)) + 2
|
||||
case int32, uint32, float32, complex64:
|
||||
return int(unsafe.Sizeof(value)) + 4
|
||||
case int64, uint64, int, uint, float64, complex128:
|
||||
return int(unsafe.Sizeof(value)) + 8
|
||||
case []interface{}:
|
||||
size := 0
|
||||
for _, v := range value.([]interface{}) {
|
||||
size += toBytes(v)
|
||||
}
|
||||
return int(unsafe.Sizeof(value)) + size
|
||||
case []string:
|
||||
size := 0
|
||||
for _, v := range value.([]string) {
|
||||
size += toBytes(v)
|
||||
}
|
||||
return int(unsafe.Sizeof(value)) + size
|
||||
case []int8:
|
||||
return int(unsafe.Sizeof(value)) + len(value.([]int8))
|
||||
case []uint8:
|
||||
return int(unsafe.Sizeof(value)) + len(value.([]uint8))
|
||||
case []bool:
|
||||
return int(unsafe.Sizeof(value)) + len(value.([]bool))
|
||||
case []int16:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int16)) * 2)
|
||||
case []uint16:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint16)) * 2)
|
||||
case []int32:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int32)) * 4)
|
||||
case []uint32:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint32)) * 4)
|
||||
case []float32:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]float32)) * 4)
|
||||
case []complex64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]complex64)) * 4)
|
||||
case []int64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int64)) * 8)
|
||||
case []uint64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint64)) * 8)
|
||||
case []int:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int)) * 8)
|
||||
case []uint:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint)) * 8)
|
||||
case []float64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]float64)) * 8)
|
||||
case []complex128:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]complex128)) * 8)
|
||||
default:
|
||||
return int(unsafe.Sizeof(value)) + len(fmt.Sprintf("%v", value))
|
||||
}
|
||||
}
|
11
vendor/github.com/TwinProduction/gocache/go.mod
generated
vendored
Normal file
11
vendor/github.com/TwinProduction/gocache/go.mod
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
module github.com/TwinProduction/gocache
|
||||
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/onsi/ginkgo v1.14.1 // indirect
|
||||
github.com/onsi/gomega v1.10.2 // indirect
|
||||
github.com/tidwall/redcon v1.3.2
|
||||
)
|
66
vendor/github.com/TwinProduction/gocache/go.sum
generated
vendored
Normal file
66
vendor/github.com/TwinProduction/gocache/go.sum
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
|
||||
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
|
||||
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/tidwall/redcon v1.3.2 h1:8INx/Nm3VSUbDUT16TH1rMgYQsbXNqy9xcX70edHXbo=
|
||||
github.com/tidwall/redcon v1.3.2/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
446
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
Normal file
446
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
Normal file
@ -0,0 +1,446 @@
|
||||
package gocache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
Debug = false
|
||||
|
||||
// NoMaxSize means that the cache has no maximum number of entries in the cache
|
||||
// Setting Cache.maxSize to this value also means there will be no eviction
|
||||
NoMaxSize = 0
|
||||
|
||||
// NoMaxMemoryUsage means that the cache has no maximum number of entries in the cache
|
||||
NoMaxMemoryUsage = 0
|
||||
|
||||
// DefaultMaxSize is the max size set if no max size is specified
|
||||
DefaultMaxSize = 100000
|
||||
|
||||
// NoExpiration is the value that must be used as TTL to specify that the given key should never expire
|
||||
NoExpiration = -1
|
||||
|
||||
Kilobyte = 1024
|
||||
Megabyte = 1024 * 1024
|
||||
Gigabyte = 1024 * 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
ErrKeyDoesNotExist = errors.New("key does not exist")
|
||||
ErrKeyHasNoExpiration = errors.New("key has no expiration")
|
||||
ErrJanitorAlreadyRunning = errors.New("janitor is already running")
|
||||
ErrAutoSaveAlreadyRunning = errors.New("autosave is already running")
|
||||
)
|
||||
|
||||
// Cache is the core struct of gocache which contains the data as well as all relevant configuration fields
|
||||
type Cache struct {
|
||||
// maxSize is the maximum amount of entries that can be in the cache at any given time
|
||||
// By default, this is set to DefaultMaxSize
|
||||
maxSize int
|
||||
|
||||
// maxMemoryUsage is the maximum amount of memory that can be taken up by the cache at any time
|
||||
// By default, this is set to NoMaxMemoryUsage, meaning that the default behavior is to not evict
|
||||
// based on maximum memory usage
|
||||
maxMemoryUsage int
|
||||
|
||||
// evictionPolicy is the eviction policy
|
||||
evictionPolicy EvictionPolicy
|
||||
|
||||
// stats is the object that contains cache statistics/metrics
|
||||
stats *Statistics
|
||||
|
||||
// entries is the content of the cache
|
||||
entries map[string]*Entry
|
||||
|
||||
// mutex is the lock for making concurrent operations on the cache
|
||||
mutex sync.RWMutex
|
||||
|
||||
// head is the cache entry at the head of the cache
|
||||
head *Entry
|
||||
|
||||
// tail is the last cache node and also the next entry that will be evicted
|
||||
tail *Entry
|
||||
|
||||
// stopJanitor is the channel used to stop the janitor
|
||||
stopJanitor chan bool
|
||||
|
||||
// memoryUsage is the approximate memory usage of the cache (dataset only) in bytes
|
||||
memoryUsage int
|
||||
}
|
||||
|
||||
// MaxSize returns the maximum amount of keys that can be present in the cache before
|
||||
// new entries trigger the eviction of the tail
|
||||
func (cache *Cache) MaxSize() int {
|
||||
return cache.maxSize
|
||||
}
|
||||
|
||||
// MaxMemoryUsage returns the configured maxMemoryUsage of the cache
|
||||
func (cache *Cache) MaxMemoryUsage() int {
|
||||
return cache.maxMemoryUsage
|
||||
}
|
||||
|
||||
// EvictionPolicy returns the EvictionPolicy of the Cache
|
||||
func (cache *Cache) EvictionPolicy() EvictionPolicy {
|
||||
return cache.evictionPolicy
|
||||
}
|
||||
|
||||
// Stats returns statistics from the cache
|
||||
func (cache *Cache) Stats() *Statistics {
|
||||
return cache.stats
|
||||
}
|
||||
|
||||
// MemoryUsage returns the current memory usage of the cache's dataset in bytes
|
||||
// If MaxMemoryUsage is set to NoMaxMemoryUsage, this will return 0
|
||||
func (cache *Cache) MemoryUsage() int {
|
||||
return cache.memoryUsage
|
||||
}
|
||||
|
||||
// WithMaxSize sets the maximum amount of entries that can be in the cache at any given time
|
||||
// A maxSize of 0 or less means infinite
|
||||
func (cache *Cache) WithMaxSize(maxSize int) *Cache {
|
||||
if maxSize < 0 {
|
||||
maxSize = NoMaxSize
|
||||
}
|
||||
cache.maxSize = maxSize
|
||||
return cache
|
||||
}
|
||||
|
||||
// WithMaxMemoryUsage sets the maximum amount of memory that can be used by the cache at any given time
|
||||
//
|
||||
// NOTE: This is approximate.
|
||||
//
|
||||
// Setting this to NoMaxMemoryUsage will disable eviction by memory usage
|
||||
func (cache *Cache) WithMaxMemoryUsage(maxMemoryUsageInBytes int) *Cache {
|
||||
if maxMemoryUsageInBytes < 0 {
|
||||
maxMemoryUsageInBytes = NoMaxMemoryUsage
|
||||
}
|
||||
cache.maxMemoryUsage = maxMemoryUsageInBytes
|
||||
return cache
|
||||
}
|
||||
|
||||
// WithEvictionPolicy sets eviction algorithm.
|
||||
// Defaults to FirstInFirstOut (FIFO)
|
||||
func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache {
|
||||
cache.evictionPolicy = policy
|
||||
return cache
|
||||
}
|
||||
|
||||
// NewCache creates a new Cache
|
||||
//
|
||||
// Should be used in conjunction with Cache.WithMaxSize, Cache.WithMaxMemoryUsage and/or Cache.WithEvictionPolicy
|
||||
//
|
||||
// gocache.NewCache().WithMaxSize(10000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
||||
//
|
||||
func NewCache() *Cache {
|
||||
return &Cache{
|
||||
maxSize: DefaultMaxSize,
|
||||
evictionPolicy: FirstInFirstOut,
|
||||
stats: &Statistics{},
|
||||
entries: make(map[string]*Entry),
|
||||
mutex: sync.RWMutex{},
|
||||
stopJanitor: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Set creates or updates a key with a given value
|
||||
func (cache *Cache) Set(key string, value interface{}) {
|
||||
cache.SetWithTTL(key, value, NoExpiration)
|
||||
}
|
||||
|
||||
// SetWithTTL creates or updates a key with a given value and sets an expiration time (-1 is NoExpiration)
|
||||
func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
|
||||
cache.mutex.Lock()
|
||||
entry, ok := cache.get(key)
|
||||
if !ok {
|
||||
// A negative TTL that isn't -1 (NoExpiration) is an entry that will expire instantly,
|
||||
// so might as well just not create it in the first place
|
||||
if ttl != NoExpiration && ttl < 0 {
|
||||
cache.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
// Cache entry doesn't exist, so we have to create a new one
|
||||
entry = &Entry{
|
||||
Key: key,
|
||||
Value: value,
|
||||
RelevantTimestamp: time.Now(),
|
||||
previous: cache.head,
|
||||
}
|
||||
if cache.head == nil {
|
||||
cache.tail = entry
|
||||
} else {
|
||||
cache.head.next = entry
|
||||
}
|
||||
cache.head = entry
|
||||
cache.entries[key] = entry
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
cache.memoryUsage += entry.SizeInBytes()
|
||||
}
|
||||
} else {
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
// Substract the old entry from the cache's memoryUsage
|
||||
cache.memoryUsage -= entry.SizeInBytes()
|
||||
}
|
||||
// Update existing entry's value
|
||||
entry.Value = value
|
||||
entry.RelevantTimestamp = time.Now()
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
// Add the memory usage of the new entry to the cache's memoryUsage
|
||||
cache.memoryUsage += entry.SizeInBytes()
|
||||
}
|
||||
// Because we just updated the entry, we need to move it back to HEAD
|
||||
cache.moveExistingEntryToHead(entry)
|
||||
}
|
||||
if ttl != NoExpiration {
|
||||
entry.Expiration = time.Now().Add(ttl).UnixNano()
|
||||
} else {
|
||||
entry.Expiration = NoExpiration
|
||||
}
|
||||
// If the cache doesn't have a maxSize/maxMemoryUsage, then there's no point checking if we need to evict
|
||||
// an entry, so we'll just return now
|
||||
if cache.maxSize == NoMaxSize && cache.maxMemoryUsage == NoMaxMemoryUsage {
|
||||
cache.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
// If there's a maxSize and the cache has more entries than the maxSize, evict
|
||||
if cache.maxSize != NoMaxSize && len(cache.entries) > cache.maxSize {
|
||||
cache.evict()
|
||||
}
|
||||
// If there's a maxMemoryUsage and the memoryUsage is above the maxMemoryUsage, evict
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage && cache.memoryUsage > cache.maxMemoryUsage {
|
||||
for cache.memoryUsage > cache.maxMemoryUsage && len(cache.entries) > 0 {
|
||||
cache.evict()
|
||||
}
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
}
|
||||
|
||||
// SetAll creates or updates multiple values
|
||||
func (cache *Cache) SetAll(entries map[string]interface{}) {
|
||||
for key, value := range entries {
|
||||
cache.SetWithTTL(key, value, NoExpiration)
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves an entry using the key passed as parameter
|
||||
// If there is no such entry, the value returned will be nil and the boolean will be false
|
||||
// If there is an entry, the value returned will be the value cached and the boolean will be true
|
||||
func (cache *Cache) Get(key string) (interface{}, bool) {
|
||||
cache.mutex.Lock()
|
||||
entry, ok := cache.get(key)
|
||||
if !ok {
|
||||
cache.mutex.Unlock()
|
||||
cache.stats.Misses++
|
||||
return nil, false
|
||||
}
|
||||
cache.stats.Hits++
|
||||
if entry.Expired() {
|
||||
cache.delete(key)
|
||||
cache.mutex.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
if cache.evictionPolicy == LeastRecentlyUsed {
|
||||
entry.Accessed()
|
||||
if cache.head == entry {
|
||||
cache.mutex.Unlock()
|
||||
return entry.Value, true
|
||||
}
|
||||
// Because the eviction policy is LRU, we need to move the entry back to HEAD
|
||||
cache.moveExistingEntryToHead(entry)
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
return entry.Value, true
|
||||
}
|
||||
|
||||
// GetAll retrieves multiple entries using the keys passed as parameter
|
||||
// All keys are returned in the map, regardless of whether they exist or not,
|
||||
// however, entries that do not exist in the cache will return nil, meaning that
|
||||
// there is no way of determining whether a key genuinely has the value nil, or
|
||||
// whether it doesn't exist in the cache using only this function
|
||||
func (cache *Cache) GetAll(keys []string) map[string]interface{} {
|
||||
entries := make(map[string]interface{})
|
||||
for _, key := range keys {
|
||||
entries[key], _ = cache.Get(key)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// GetKeysByPattern retrieves a slice of keys that match a given pattern
|
||||
// If the limit is set to 0, the entire cache will be searched for matching keys.
|
||||
// If the limit is above 0, the search will stop once the specified number of matching keys have been found.
|
||||
//
|
||||
// e.g. cache.GetKeysByPattern("*some*", 0) will return all keys containing "some" in them
|
||||
// e.g. cache.GetKeysByPattern("*some*", 5) will return 5 keys (or less) containing "some" in them
|
||||
//
|
||||
// Note that GetKeysByPattern does not trigger evictions, nor does it count as accessing the entry.
|
||||
func (cache *Cache) GetKeysByPattern(pattern string, limit int) []string {
|
||||
var matchingKeys []string
|
||||
cache.mutex.RLock()
|
||||
for key := range cache.entries {
|
||||
if MatchPattern(pattern, key) {
|
||||
matchingKeys = append(matchingKeys, key)
|
||||
if limit > 0 && len(matchingKeys) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cache.mutex.RUnlock()
|
||||
return matchingKeys
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache
|
||||
//
|
||||
// Returns false if the key did not exist.
|
||||
func (cache *Cache) Delete(key string) bool {
|
||||
cache.mutex.Lock()
|
||||
ok := cache.delete(key)
|
||||
cache.mutex.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// DeleteAll deletes multiple entries based on the keys passed as parameter
|
||||
//
|
||||
// Returns the number of keys deleted
|
||||
func (cache *Cache) DeleteAll(keys []string) int {
|
||||
numberOfKeysDeleted := 0
|
||||
cache.mutex.Lock()
|
||||
for _, key := range keys {
|
||||
if cache.delete(key) {
|
||||
numberOfKeysDeleted++
|
||||
}
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
return numberOfKeysDeleted
|
||||
}
|
||||
|
||||
// Count returns the total amount of entries in the cache, regardless of whether they're expired or not
|
||||
func (cache *Cache) Count() int {
|
||||
cache.mutex.RLock()
|
||||
count := len(cache.entries)
|
||||
cache.mutex.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
// Clear deletes all entries from the cache
|
||||
func (cache *Cache) Clear() {
|
||||
cache.mutex.Lock()
|
||||
cache.entries = make(map[string]*Entry)
|
||||
cache.memoryUsage = 0
|
||||
cache.head = nil
|
||||
cache.tail = nil
|
||||
cache.mutex.Unlock()
|
||||
}
|
||||
|
||||
// TTL returns the time until the cache entry specified by the key passed as parameter
|
||||
// will be deleted.
|
||||
func (cache *Cache) TTL(key string) (time.Duration, error) {
|
||||
cache.mutex.RLock()
|
||||
entry, ok := cache.get(key)
|
||||
cache.mutex.RUnlock()
|
||||
if !ok {
|
||||
return 0, ErrKeyDoesNotExist
|
||||
}
|
||||
if entry.Expiration == NoExpiration {
|
||||
return 0, ErrKeyHasNoExpiration
|
||||
}
|
||||
timeUntilExpiration := time.Until(time.Unix(0, entry.Expiration))
|
||||
if timeUntilExpiration < 0 {
|
||||
// The key has already expired but hasn't been deleted yet.
|
||||
// From the client's perspective, this means that the cache entry doesn't exist
|
||||
return 0, ErrKeyDoesNotExist
|
||||
}
|
||||
return timeUntilExpiration, nil
|
||||
}
|
||||
|
||||
// Expire sets a key's expiration time
|
||||
//
|
||||
// A TTL of -1 means that the key will never expire
|
||||
// A TTL of 0 means that the key will expire immediately
|
||||
// If using LRU, note that this does not reset the position of the key
|
||||
//
|
||||
// Returns true if the cache key exists and has had its expiration time altered
|
||||
func (cache *Cache) Expire(key string, ttl time.Duration) bool {
|
||||
entry, ok := cache.get(key)
|
||||
if !ok || entry.Expired() {
|
||||
return false
|
||||
}
|
||||
if ttl != NoExpiration {
|
||||
entry.Expiration = time.Now().Add(ttl).UnixNano()
|
||||
} else {
|
||||
entry.Expiration = NoExpiration
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// get retrieves an entry using the key passed as parameter, but unlike Get, it doesn't update the access time or
|
||||
// move the position of the entry to the head
|
||||
func (cache *Cache) get(key string) (*Entry, bool) {
|
||||
entry, ok := cache.entries[key]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
func (cache *Cache) delete(key string) bool {
|
||||
entry, ok := cache.entries[key]
|
||||
if ok {
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
cache.memoryUsage -= entry.SizeInBytes()
|
||||
}
|
||||
cache.removeExistingEntryReferences(entry)
|
||||
delete(cache.entries, key)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// moveExistingEntryToHead replaces the current cache head for an existing entry
|
||||
func (cache *Cache) moveExistingEntryToHead(entry *Entry) {
|
||||
if !(entry == cache.head && entry == cache.tail) {
|
||||
cache.removeExistingEntryReferences(entry)
|
||||
}
|
||||
if entry != cache.head {
|
||||
entry.previous = cache.head
|
||||
entry.next = nil
|
||||
if cache.head != nil {
|
||||
cache.head.next = entry
|
||||
}
|
||||
cache.head = entry
|
||||
}
|
||||
}
|
||||
|
||||
// removeExistingEntryReferences modifies the next and previous reference of an existing entry and re-links
|
||||
// the next and previous entry accordingly, as well as the cache head or/and the cache tail if necessary.
|
||||
// Note that it does not remove the entry from the cache, only the references.
|
||||
func (cache *Cache) removeExistingEntryReferences(entry *Entry) {
|
||||
if cache.tail == entry && cache.head == entry {
|
||||
cache.tail = nil
|
||||
cache.head = nil
|
||||
} else if cache.tail == entry {
|
||||
cache.tail = cache.tail.next
|
||||
} else if cache.head == entry {
|
||||
cache.head = cache.head.previous
|
||||
}
|
||||
if entry.previous != nil {
|
||||
entry.previous.next = entry.next
|
||||
}
|
||||
if entry.next != nil {
|
||||
entry.next.previous = entry.previous
|
||||
}
|
||||
entry.next = nil
|
||||
entry.previous = nil
|
||||
}
|
||||
|
||||
// evict removes the tail from the cache
|
||||
func (cache *Cache) evict() {
|
||||
if cache.tail == nil || len(cache.entries) == 0 {
|
||||
return
|
||||
}
|
||||
if cache.tail != nil {
|
||||
oldTail := cache.tail
|
||||
cache.removeExistingEntryReferences(oldTail)
|
||||
delete(cache.entries, oldTail.Key)
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
cache.memoryUsage -= oldTail.SizeInBytes()
|
||||
}
|
||||
cache.stats.EvictedKeys++
|
||||
}
|
||||
}
|
134
vendor/github.com/TwinProduction/gocache/janitor.go
generated
vendored
Normal file
134
vendor/github.com/TwinProduction/gocache/janitor.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
package gocache
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// JanitorShiftTarget is the target number of expired keys to find during passive clean up duty
|
||||
// before pausing the passive expired keys eviction process
|
||||
JanitorShiftTarget = 25
|
||||
|
||||
// JanitorMaxIterationsPerShift is the maximum number of nodes to traverse before pausing
|
||||
JanitorMaxIterationsPerShift = 1000
|
||||
|
||||
// JanitorMinShiftBackOff is the minimum interval between each iteration of steps
|
||||
// defined by JanitorMaxIterationsPerShift
|
||||
JanitorMinShiftBackOff = time.Millisecond * 50
|
||||
|
||||
// JanitorMaxShiftBackOff is the maximum interval between each iteration of steps
|
||||
// defined by JanitorMaxIterationsPerShift
|
||||
JanitorMaxShiftBackOff = time.Millisecond * 500
|
||||
)
|
||||
|
||||
// StartJanitor starts the janitor on a different goroutine
|
||||
// The janitor's job is to delete expired keys in the background.
|
||||
// It can be stopped by calling Cache.StopJanitor.
|
||||
// If you do not start the janitor, expired keys will only be deleted when they are accessed through Get
|
||||
func (cache *Cache) StartJanitor() error {
|
||||
if cache.stopJanitor != nil {
|
||||
return ErrJanitorAlreadyRunning
|
||||
}
|
||||
cache.stopJanitor = make(chan bool)
|
||||
go func() {
|
||||
// rather than starting from the tail on every run, we can try to start from the last next entry
|
||||
var lastTraversedNode *Entry
|
||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead := 0
|
||||
backOff := JanitorMinShiftBackOff
|
||||
for {
|
||||
select {
|
||||
case <-time.After(backOff):
|
||||
// Passive clean up duty
|
||||
cache.mutex.Lock()
|
||||
if cache.tail != nil {
|
||||
start := time.Now()
|
||||
steps := 0
|
||||
expiredEntriesFound := 0
|
||||
current := cache.tail
|
||||
if lastTraversedNode != nil {
|
||||
// Make sure the lastTraversedNode is still in the cache, otherwise we might be traversing nodes that were already deleted.
|
||||
// Furthermore, we need to make sure that the entry from the cache has the same pointer as the lastTraversedNode
|
||||
// to verify that there isn't just a new cache entry with the same key (i.e. in case lastTraversedNode got evicted)
|
||||
if entryFromCache, isInCache := cache.get(lastTraversedNode.Key); isInCache && entryFromCache == lastTraversedNode {
|
||||
current = lastTraversedNode
|
||||
}
|
||||
}
|
||||
if current == cache.tail {
|
||||
if Debug {
|
||||
log.Printf("There are currently %d entries in the cache. The last walk resulted in finding %d expired keys", len(cache.entries), totalNumberOfExpiredKeysInPreviousRunFromTailToHead)
|
||||
}
|
||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead = 0
|
||||
}
|
||||
for current != nil {
|
||||
var next *Entry
|
||||
steps++
|
||||
if current.Expired() {
|
||||
expiredEntriesFound++
|
||||
// Because delete will remove the next reference from the entry, we need to store the
|
||||
// next reference before we delete it
|
||||
next = current.next
|
||||
cache.delete(current.Key)
|
||||
cache.stats.ExpiredKeys++
|
||||
}
|
||||
if current == cache.head {
|
||||
lastTraversedNode = nil
|
||||
break
|
||||
}
|
||||
// Travel to the current node's next node only if no specific next node has been specified
|
||||
if next != nil {
|
||||
current = next
|
||||
} else {
|
||||
current = current.next
|
||||
}
|
||||
lastTraversedNode = current
|
||||
if steps == JanitorMaxIterationsPerShift || expiredEntriesFound >= JanitorShiftTarget {
|
||||
if expiredEntriesFound > 0 {
|
||||
backOff = JanitorMinShiftBackOff
|
||||
} else {
|
||||
if backOff*2 <= JanitorMaxShiftBackOff {
|
||||
backOff *= 2
|
||||
} else {
|
||||
backOff = JanitorMaxShiftBackOff
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if Debug {
|
||||
log.Printf("traversed %d nodes and found %d expired entries in %s before stopping\n", steps, expiredEntriesFound, time.Since(start))
|
||||
}
|
||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead += expiredEntriesFound
|
||||
} else {
|
||||
if backOff*2 < JanitorMaxShiftBackOff {
|
||||
backOff *= 2
|
||||
} else {
|
||||
backOff = JanitorMaxShiftBackOff
|
||||
}
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
case <-cache.stopJanitor:
|
||||
cache.stopJanitor = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
if Debug {
|
||||
go func() {
|
||||
var m runtime.MemStats
|
||||
for {
|
||||
runtime.ReadMemStats(&m)
|
||||
log.Printf("Alloc=%vMB; HeapReleased=%vMB; Sys=%vMB; HeapInUse=%vMB; HeapObjects=%v; HeapObjectsFreed=%v; GC=%v; cache.memoryUsage=%vMB; cacheSize=%d\n", m.Alloc/1024/1024, m.HeapReleased/1024/1024, m.Sys/1024/1024, m.HeapInuse/1024/1024, m.HeapObjects, m.Frees, m.NumGC, cache.memoryUsage/1024/1024, cache.Count())
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopJanitor stops the janitor
|
||||
func (cache *Cache) StopJanitor() {
|
||||
cache.stopJanitor <- true
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
12
vendor/github.com/TwinProduction/gocache/pattern.go
generated
vendored
Normal file
12
vendor/github.com/TwinProduction/gocache/pattern.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package gocache
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// MatchPattern checks whether a string matches a pattern
|
||||
func MatchPattern(pattern, s string) bool {
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
matched, _ := filepath.Match(pattern, s)
|
||||
return matched
|
||||
}
|
147
vendor/github.com/TwinProduction/gocache/persistence.go
generated
vendored
Normal file
147
vendor/github.com/TwinProduction/gocache/persistence.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
package gocache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"github.com/boltdb/bolt"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SaveToFile stores the content of the cache to a file so that it can be read using
|
||||
// the ReadFromFile function
|
||||
func (cache *Cache) SaveToFile(path string) error {
|
||||
db, err := bolt.Open(path, os.ModePerm, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start := time.Now()
|
||||
cache.mutex.RLock()
|
||||
bulkEntries := make([]*Entry, len(cache.entries))
|
||||
i := 0
|
||||
for _, v := range cache.entries {
|
||||
bulkEntries[i] = v
|
||||
i++
|
||||
}
|
||||
cache.mutex.RUnlock()
|
||||
if Debug {
|
||||
log.Printf("unlocked after %s", time.Since(start))
|
||||
}
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte("entries"))
|
||||
bucket, err := tx.CreateBucket([]byte("entries"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, bulkEntry := range bulkEntries {
|
||||
buffer := bytes.Buffer{}
|
||||
err = gob.NewEncoder(&buffer).Encode(bulkEntry)
|
||||
if err != nil {
|
||||
// Failed to encode the value, so we'll skip it.
|
||||
// This is likely due to the fact that the custom struct wasn't registered using gob.Register(...)
|
||||
// See [Persistence - Limitations](https://github.com/TwinProduction/gocache#limitations)
|
||||
continue
|
||||
}
|
||||
bucket.Put([]byte(bulkEntry.Key), buffer.Bytes())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return db.Close()
|
||||
}
|
||||
|
||||
// ReadFromFile populates the cache using a file created using cache.SaveToFile(path)
|
||||
//
|
||||
// Note that if the number of entries retrieved from the file exceed the configured maxSize,
|
||||
// the extra entries will be automatically evicted according to the EvictionPolicy configured.
|
||||
// This function returns the number of entries evicted, and because this function only reads
|
||||
// from a file and does not modify it, you can safely retry this function after configuring
|
||||
// the cache with the appropriate maxSize, should you desire to.
|
||||
func (cache *Cache) ReadFromFile(path string) (int, error) {
|
||||
db, err := bolt.Open(path, os.ModePerm, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer db.Close()
|
||||
cache.mutex.Lock()
|
||||
defer cache.mutex.Unlock()
|
||||
err = db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte("entries"))
|
||||
// If the bucket doesn't exist, there's nothing to read, so we'll return right now
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
err = bucket.ForEach(func(k, v []byte) error {
|
||||
buffer := new(bytes.Buffer)
|
||||
decoder := gob.NewDecoder(buffer)
|
||||
entry := Entry{}
|
||||
buffer.Write(v)
|
||||
err := decoder.Decode(&entry)
|
||||
if err != nil {
|
||||
// Failed to decode the value, so we'll skip it.
|
||||
// This is likely due to the fact that the custom struct wasn't registered using gob.Register(...)
|
||||
// See [Persistence - Limitations](https://github.com/TwinProduction/gocache#limitations)
|
||||
return err
|
||||
}
|
||||
cache.entries[string(k)] = &entry
|
||||
buffer.Reset()
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Because pointers don't get stored in the file, we need to relink everything from head to tail
|
||||
var entries []*Entry
|
||||
for _, v := range cache.entries {
|
||||
entries = append(entries, v)
|
||||
}
|
||||
// Sort the slice of entries from oldest to newest
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
return entries[i].RelevantTimestamp.Before(entries[j].RelevantTimestamp)
|
||||
})
|
||||
// Relink the nodes from tail to head
|
||||
var previous *Entry
|
||||
for i := range entries {
|
||||
current := entries[i]
|
||||
if previous == nil {
|
||||
cache.tail = current
|
||||
cache.head = current
|
||||
} else {
|
||||
previous.next = current
|
||||
current.previous = previous
|
||||
cache.head = current
|
||||
}
|
||||
previous = entries[i]
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
cache.memoryUsage += current.SizeInBytes()
|
||||
}
|
||||
}
|
||||
// If the cache doesn't have a maxSize/maxMemoryUsage, then there's no point checking if we need to evict
|
||||
// an entry, so we'll just return now
|
||||
if cache.maxSize == NoMaxSize && cache.maxMemoryUsage == NoMaxMemoryUsage {
|
||||
return 0, nil
|
||||
}
|
||||
// Evict what needs to be evicted
|
||||
numberOfEvictions := 0
|
||||
// If there's a maxSize and the cache has more entries than the maxSize, evict
|
||||
if cache.maxSize != NoMaxSize && len(cache.entries) > cache.maxSize {
|
||||
for len(cache.entries) > cache.maxSize {
|
||||
numberOfEvictions++
|
||||
cache.evict()
|
||||
}
|
||||
}
|
||||
// If there's a maxMemoryUsage and the memoryUsage is above the maxMemoryUsage, evict
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage && cache.memoryUsage > cache.maxMemoryUsage {
|
||||
for cache.memoryUsage > cache.maxMemoryUsage && len(cache.entries) > 0 {
|
||||
numberOfEvictions++
|
||||
cache.evict()
|
||||
}
|
||||
}
|
||||
return numberOfEvictions, nil
|
||||
}
|
8
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
Normal file
8
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
package gocache
|
||||
|
||||
type EvictionPolicy string
|
||||
|
||||
var (
|
||||
LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed"
|
||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
||||
)
|
15
vendor/github.com/TwinProduction/gocache/statistics.go
generated
vendored
Normal file
15
vendor/github.com/TwinProduction/gocache/statistics.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package gocache
|
||||
|
||||
type Statistics struct {
|
||||
// EvictedKeys is the number of keys that were evicted
|
||||
EvictedKeys uint64
|
||||
|
||||
// ExpiredKeys is the number of keys that were automatically deleted as a result of expiring
|
||||
ExpiredKeys uint64
|
||||
|
||||
// Hits is the number of cache hits
|
||||
Hits uint64
|
||||
|
||||
// Misses is the number of cache misses
|
||||
Misses uint64
|
||||
}
|
Reference in New Issue
Block a user