-
Notifications
You must be signed in to change notification settings - Fork 0
/
cache.go
131 lines (111 loc) · 2.9 KB
/
cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
package radish
import (
"sync"
"sync/atomic"
"time"
)
// Warning: Never do that in production,
// always use proven open source libraries for complex concurrent data structures and algorithms
// (for example https://github.com/patrickmn/go-cache or https://github.com/karlseguin/ccache),
// this is here exclusively for fun and educational purposes.
// simple cuncurrent epoch based cache with only ttl expiration
const buckets = 3 // minimum 3
type cache struct {
bucket [buckets]sync.Map
epoch int32
}
type wrapped struct {
value interface{}
created time.Time
ttl time.Duration
removed bool
}
func (w wrapped) unwrap() (interface{}, bool) {
if w.expired() {
return nil, false
}
return w.value, true
}
func (w wrapped) expired() bool {
if w.removed {
return true
}
if w.ttl == NoExpiration {
return false
}
return time.Since(w.created) >= w.ttl
}
func newcache() *cache {
return &cache{}
}
// get life value from cache
func (c *cache) get(key string) (interface{}, bool) {
cur := atomic.LoadInt32(&c.epoch)
v, ok := c.bucket[cur].Load(key)
if !ok {
v, ok = c.bucket[last(cur)].Load(key)
if !ok {
return nil, false
}
}
return v.(wrapped).unwrap()
}
// internal method put value with defined creation time
func (c *cache) setw(key string, value wrapped) {
cur := atomic.LoadInt32(&c.epoch)
c.bucket[cur].Store(key, value)
}
// put value into cache
func (c *cache) set(key string, value interface{}, ttl time.Duration) {
c.setw(key, wrapped{value, time.Now(), ttl, false})
}
// put tomb value into cache
func (c *cache) remove(key string) {
c.setw(key, wrapped{removed: true})
}
// returns collection of life keys in cache
func (c *cache) keys() []string {
result := map[string]bool{}
cur := atomic.LoadInt32(&c.epoch)
// collect keys with expiration status from last epoch firstly
c.bucket[last(cur)].Range(func(key, value interface{}) bool {
result[key.(string)] = value.(wrapped).expired()
return true
})
// then collect them from current epoch
c.bucket[cur].Range(func(key, value interface{}) bool {
result[key.(string)] = value.(wrapped).expired()
return true
})
// take only live ones
list := []string{}
for key, expired := range result {
if !expired {
list = append(list, key)
}
}
return list
}
// we expect that calls to nextEpoch occurs rare, so no concurrent writes to last epoch exist at this moment
func (c *cache) nextEpoch() {
// steps order important!
cur := atomic.LoadInt32(&c.epoch)
// copy live values from last epoch
c.bucket[last(cur)].Range(func(key, value interface{}) bool {
if !value.(wrapped).expired() {
c.bucket[cur].LoadOrStore(key, value)
}
return true
})
// cleanup next epoch
c.bucket[next(cur)] = sync.Map{}
atomic.StoreInt32(&c.epoch, next(cur))
}
// return next epoch
func next(epoch int32) int32 {
return (epoch + 1) % buckets
}
// return last epoch
func last(epoch int32) int32 {
return (epoch - 1 + buckets) % buckets
}