Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: isaacs/node-lru-cache
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: v7.5.1
Choose a base ref
...
head repository: isaacs/node-lru-cache
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: v7.5.2
Choose a head ref
  • 3 commits
  • 6 files changed
  • 1 contributor

Commits on Apr 9, 2022

  1. backport publish tag

    isaacs committed Apr 9, 2022
    Copy the full SHA
    d169d84 View commit details
  2. Fix memory leak in unbounded storage case

    Evictions were no being tracked properly when maxSize triggered an
    eviction.
    
    In the max-bound storage case, this wasn't so much of a problem, because
    the storage couldn't grow unbounded anyway.  However, since size-evicted
    indexes were not being added to the `free` list, it would effectively
    reduce the available storage space, holding onto old entries and
    shortening the available storage area, which would result in fewer items
    being cached (except some that were cached indefinitely!)
    
    In the unbounded case, this was much worse.  Because the storage array
    was unbounded, the cache effectively always remained in the initial
    "fill to completion" state, resulting in holding onto _every_ item
    forever, while thinking that the `calculatedSize` was being limited to
    `maxSize`!  Pretty bad.
    
    (This will be back-ported to other 7.x versions as a patch.)
    
    Thanks to @moish83 for the report.
    
    Fix: #227
    isaacs committed Apr 9, 2022
    Copy the full SHA
    ffd8542 View commit details
  3. 7.5.2

    isaacs committed Apr 9, 2022
    Copy the full SHA
    5c146c6 View commit details
Showing with 945 additions and 1,573 deletions.
  1. +11 −4 index.js
  2. +742 −1,566 package-lock.json
  3. +10 −3 package.json
  4. +1 −0 scripts/.gitignore
  5. +68 −0 scripts/max-size-test.js
  6. +113 −0 test/avoid-memory-leak.js
15 changes: 11 additions & 4 deletions index.js
Original file line number Diff line number Diff line change
@@ -115,6 +115,7 @@ class LRUCache {
throw new TypeError('sizeCalculating set to non-function')
}
}

this.keyMap = new Map()
this.keyList = new Array(max).fill(null)
this.valList = new Array(max).fill(null)
@@ -224,7 +225,7 @@ class LRUCache {
this.sizes[index] = isPosInt(s) ? s : 0
const maxSize = this.maxSize - this.sizes[index]
while (this.calculatedSize > maxSize) {
this.evict()
this.evict(true)
}
this.calculatedSize += this.sizes[index]
}
@@ -437,7 +438,7 @@ class LRUCache {
return this.tail
}
if (this.size === this.max) {
return this.evict()
return this.evict(false)
}
if (this.free.length !== 0) {
return this.free.pop()
@@ -449,12 +450,12 @@ class LRUCache {
pop () {
if (this.size) {
const val = this.valList[this.head]
this.evict()
this.evict(true)
return val
}
}

evict () {
evict (free) {
const head = this.head
const k = this.keyList[head]
const v = this.valList[head]
@@ -463,6 +464,12 @@ class LRUCache {
this.disposed.push([v, k, 'evict'])
}
this.removeItemSize(head)
// if we aren't about to use the index, then null these out
if (free) {
this.keyList[head] = null
this.valList[head] = null
this.free.push(head)
}
this.head = this.next[head]
this.keyMap.delete(k)
this.size --
2,308 changes: 742 additions & 1,566 deletions package-lock.json

Large diffs are not rendered by default.

13 changes: 10 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
{
"name": "lru-cache",
"description": "A cache object that deletes the least-recently-used items.",
"version": "7.5.1",
"version": "7.5.2",
"publishConfig": {
"tag": "v7.5-backport"
},
"author": "Isaac Z. Schlueter <i@izs.me>",
"keywords": [
"mru",
@@ -22,8 +25,9 @@
"devDependencies": {
"@size-limit/preset-small-lib": "^7.0.8",
"benchmark": "^2.1.4",
"heapdump": "^0.3.15",
"size-limit": "^7.0.8",
"tap": "^15.1.6"
"tap": "^16.0.1"
},
"license": "ISC",
"files": [
@@ -33,7 +37,10 @@
"node": ">=12"
},
"tap": {
"coverage-map": "map.js"
"coverage-map": "map.js",
"node-arg": [
"--expose-gc"
]
},
"size-limit": [
{
1 change: 1 addition & 0 deletions scripts/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
heapdump-*
68 changes: 68 additions & 0 deletions scripts/max-size-test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
#!/usr/bin/env node --max_old_space_size=400 --expose_gc

// https://github.com/isaacs/node-lru-cache/issues/227

if (typeof gc !== 'function') {
throw new Error('run with --expose-gc')
}

const heapdump = require('heapdump')

const LRU = require('../')
const maxSize = 1_000_000
const itemSize = 1_000
const expectItemCount = Math.floor(maxSize / itemSize)
const profEvery = 100_000
const n = 10_000_000

const sizeCalculation = s => s.length || 1
const max = expectItemCount + 1
const keyRange = expectItemCount * 2
const makeItem = () => Buffer.alloc(itemSize)

const v8 = require('v8')
const prof = async (i, cache, name) => {
// run gc so that we know if we're actually leaking memory, or just
// that the gc is being lazy and not responding until there's memory
// pressure.
gc()
const file = `${__dirname}/heapdump-${name}-${i}.heapsnapshot`
await new Promise((res, rej) =>
heapdump.writeSnapshot(file, er => er ? rej(er) : res()))
if (!cache || i === 0) {
console.log(i, v8.getHeapStatistics(), file)
}
if (cache && i === 0) {
console.log(max, cache.valList.length, cache.free.length)
}
}

const test = async (name, cache) => {
console.log(name)
for (let i = 0; i < n; i++) {
if ((i % profEvery) === 0) {
await prof(i, cache, name)
}

// add items within a range of 2x the expected item count,
// so we get evictions happening
const item = makeItem()
cache.set(i % keyRange, item)

// get some random item, too, to keep the list a bit shuffled.
// often these will be missing, of course, but expectItemCount/keyRange
// times they'll be a hit, once the cache is full.
const j = Math.floor(Math.random() * keyRange)
cache.get(j)
}
cache = null
prof(n, null, name)
}

const main = async () => {
await test('max-no-maxSize', new LRU({ max }))
await test('max-maxSize', new LRU({ max, maxSize, sizeCalculation }))
await test('no-max-maxSize', new LRU({ maxSize, sizeCalculation }))
}

main()
113 changes: 113 additions & 0 deletions test/avoid-memory-leak.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
#!/usr/bin/env node --expose-gc

// https://github.com/isaacs/node-lru-cache/issues/227

const t = require('tap')

const maxSize = 100_000
const itemSize = 1_000
const profEvery = 10_000
const n = 1_000_000

if (typeof gc !== 'function') {
t.plan(0, 'run with --expose-gc')
process.exit(0)
}

const tryReq = mod => {
try {
return require(mod)
} catch (er) {
t.plan(0, `need ${mod} module`)
process.exit(0)
}
}

const v8 = tryReq('v8')

const LRUCache = require('../')
const expectItemCount = Math.ceil(maxSize / itemSize)
const max = expectItemCount + 1
const keyRange = expectItemCount * 2

// fine to alloc unsafe, we don't ever look at the data
const makeItem = () => Buffer.allocUnsafe(itemSize)

const prof = (i, cache) => {
// run gc so that we know if we're actually leaking memory, or just
// that the gc is being lazy and not responding until there's memory
// pressure.
gc()
return {
i,
...v8.getHeapStatistics(),
valListLength: cache.valList.length,
freeLength: cache.free.length,
}
}

const runTest = async (t, cache) => {
// first, fill to expected size
for (let i = 0; i < expectItemCount; i++) {
cache.set(i, makeItem())
}

// now start the setting and profiling
const profiles = []
for (let i = 0; i < n; i++) {
if ((i % profEvery) === 0) {
const profile = prof(i, cache)
t.ok(
profile.valListLength <= max,
`expect valList to have fewer than ${max} items`,
{ found: profile.valListLength },
)
t.ok(
profile.freeLength <= 1,
'expect free stack to have <= 1 item',
{ found: profile.freeLength },
)
t.equal(profile.number_of_native_contexts, 1, '1 native context')
t.equal(profile.number_of_detached_contexts, 0, '0 native context')
profiles.push(profile)
}

const item = makeItem()
cache.set(i % keyRange, item)
}

const profile = prof(n, cache)
profiles.push(profile)

// Warning: kludgey inexact test!
// memory leaks can be hard to catch deterministically.
// The first few items will tend to be lower, and we'll see
// *some* modest increase in heap usage from tap itself as it
// runs the test and builds up its internal results data.
// But, after the initial few profiles, it should be modest.
// Considering that the reported bug showed a 10x increase in
// memory in this reproduction case, 2x is still pretty aggressive,
// without risking false hits from other node or tap stuff.

const start = Math.floor(profiles.length / 2)
const initial = profiles[start]
for (let i = start; i < profiles.length; i++) {
const current = profiles[i]
const delta = current.total_heap_size / initial.total_heap_size
t.ok(delta < 2, 'memory growth should not be unbounded', {
delta,
current,
initial,
})
}
}

t.test('both max and maxSize', t =>
runTest(t, new LRUCache({
maxSize,
sizeCalculation: s => s.length,
max,
})))

t.test('only max, no maxSize', t =>
runTest(t, new LRUCache({ max })))