|
| 1 | +#!/usr/bin/env node |
| 2 | + |
| 3 | +// [start-readme] |
| 4 | +// |
| 5 | +// Run this script to manually purge the Redis rendered page cache. |
| 6 | +// This will typically only be run by Heroku during the deployment process, |
| 7 | +// as triggered via our Procfile's "release" phase configuration. |
| 8 | +// |
| 9 | +// [end-readme] |
| 10 | + |
| 11 | +const program = require('commander') |
| 12 | +const Redis = require('ioredis') |
| 13 | + |
| 14 | +const { REDIS_URL, HEROKU_RELEASE_VERSION, HEROKU_PRODUCTION_APP } = process.env |
| 15 | +const isHerokuProd = HEROKU_PRODUCTION_APP === 'true' |
| 16 | +const pageCacheDatabaseNumber = 1 |
| 17 | +const keyScanningPattern = HEROKU_RELEASE_VERSION ? '*:rp:*' : 'rp:*' |
| 18 | +const scanSetSize = 250 |
| 19 | + |
| 20 | +const startTime = Date.now() |
| 21 | +const expirationDuration = 30 * 60 * 1000 // 30 minutes |
| 22 | +const expirationTimestamp = startTime + expirationDuration // 30 minutes from now |
| 23 | + |
| 24 | +program |
| 25 | + .description('Purge the Redis rendered page cache') |
| 26 | + .option('-d, --dry-run', 'print keys to be purged without actually purging') |
| 27 | + .parse(process.argv) |
| 28 | + |
| 29 | +const dryRun = program.dryRun |
| 30 | + |
| 31 | +// verify environment variables |
| 32 | +if (!REDIS_URL) { |
| 33 | + if (isHerokuProd) { |
| 34 | + console.error('Error: you must specify the REDIS_URL environment variable.\n') |
| 35 | + process.exit(1) |
| 36 | + } else { |
| 37 | + console.warn('Warning: you did not specify a REDIS_URL environment variable. Exiting...\n') |
| 38 | + process.exit(0) |
| 39 | + } |
| 40 | +} |
| 41 | + |
| 42 | +console.log({ |
| 43 | + HEROKU_RELEASE_VERSION, |
| 44 | + HEROKU_PRODUCTION_APP |
| 45 | +}) |
| 46 | + |
| 47 | +purgeRenderedPageCache() |
| 48 | + |
| 49 | +function purgeRenderedPageCache () { |
| 50 | + const redisClient = new Redis(REDIS_URL, { db: pageCacheDatabaseNumber }) |
| 51 | + let totalKeyCount = 0 |
| 52 | + let iteration = 0 |
| 53 | + |
| 54 | + // Create a readable stream (object mode) for the SCAN cursor |
| 55 | + const scanStream = redisClient.scanStream({ |
| 56 | + match: keyScanningPattern, |
| 57 | + count: scanSetSize |
| 58 | + }) |
| 59 | + |
| 60 | + scanStream.on('end', function () { |
| 61 | + console.log(`Done purging keys; affected total: ${totalKeyCount}`) |
| 62 | + console.log(`Time elapsed: ${Date.now() - startTime} ms`) |
| 63 | + |
| 64 | + // This seems to be unexpectedly necessary |
| 65 | + process.exit(0) |
| 66 | + }) |
| 67 | + |
| 68 | + scanStream.on('error', function (error) { |
| 69 | + console.error('An unexpected error occurred!\n' + error.stack) |
| 70 | + console.error('\nAborting...') |
| 71 | + process.exit(1) |
| 72 | + }) |
| 73 | + |
| 74 | + scanStream.on('data', async function (keys) { |
| 75 | + console.log(`[Iteration ${iteration++}] Received ${keys.length} keys...`) |
| 76 | + |
| 77 | + // NOTE: It is possible for a SCAN cursor iteration to return 0 keys when |
| 78 | + // using a MATCH because it is applied after the elements are retrieved |
| 79 | + if (keys.length === 0) return |
| 80 | + |
| 81 | + if (dryRun) { |
| 82 | + console.log(`DRY RUN! This iteration might have set TTL for up to ${keys.length} keys:\n - ${keys.join('\n - ')}`) |
| 83 | + return |
| 84 | + } |
| 85 | + |
| 86 | + // Pause the SCAN stream while we set a TTL on these keys |
| 87 | + scanStream.pause() |
| 88 | + |
| 89 | + // Find existing TTLs to ensure we aren't extending the TTL if it's already set |
| 90 | + // PTTL mykey // only operate on -1 result values or those greater than ONE_HOUR_FROM_NOW |
| 91 | + const pttlPipeline = redisClient.pipeline() |
| 92 | + keys.forEach(key => pttlPipeline.pttl(key)) |
| 93 | + const pttlResults = await pttlPipeline.exec() |
| 94 | + |
| 95 | + // Update pertinent keys to have TTLs set |
| 96 | + let updatingKeyCount = 0 |
| 97 | + const pexpireAtPipeline = redisClient.pipeline() |
| 98 | + keys.forEach((key, i) => { |
| 99 | + const [error, pttl] = pttlResults[i] |
| 100 | + const needsShortenedTtl = error == null && (pttl === -1 || pttl > expirationDuration) |
| 101 | + const isOldKey = !HEROKU_RELEASE_VERSION || !key.startsWith(`${HEROKU_RELEASE_VERSION}:`) |
| 102 | + |
| 103 | + if (needsShortenedTtl && isOldKey) { |
| 104 | + pexpireAtPipeline.pexpireat(key, expirationTimestamp) |
| 105 | + updatingKeyCount += 1 |
| 106 | + } |
| 107 | + }) |
| 108 | + |
| 109 | + // Only update TTLs if there are records worth updating |
| 110 | + if (updatingKeyCount > 0) { |
| 111 | + // Set all the TTLs |
| 112 | + const pexpireAtResults = await pexpireAtPipeline.exec() |
| 113 | + const updatedResults = pexpireAtResults.filter(([error, result]) => error == null && result === 1) |
| 114 | + |
| 115 | + // Count only the entries whose TTLs were successfully updated |
| 116 | + totalKeyCount += updatedResults.length |
| 117 | + } |
| 118 | + |
| 119 | + // Resume the SCAN stream |
| 120 | + scanStream.resume() |
| 121 | + }) |
| 122 | +} |
0 commit comments