-
Notifications
You must be signed in to change notification settings - Fork 7
/
ebisu.min.js.map
7 lines (7 loc) · 33.2 KB
/
ebisu.min.js.map
1
2
3
4
5
6
7
{
"version": 3,
"sources": ["../node_modules/minimize-golden-section-1d/src/golden-section-minimize.js", "../node_modules/minimize-golden-section-1d/src/bracket-minimum.js", "../node_modules/minimize-golden-section-1d/index.js", "../node_modules/gamma/index.js", "../index.ts", "../fmin.ts", "../gamma.ts", "../logsumexp.ts"],
"sourcesContent": ["'use strict';\n\nvar PHI_RATIO = 2 / (1 + Math.sqrt(5));\n\nmodule.exports = goldenSectionMinimize;\n\nfunction goldenSectionMinimize (f, xL, xU, tol, maxIterations, status) {\n var xF, fF;\n var iteration = 0;\n var x1 = xU - PHI_RATIO * (xU - xL);\n var x2 = xL + PHI_RATIO * (xU - xL);\n // Initial bounds:\n var f1 = f(x1);\n var f2 = f(x2);\n\n // Store these values so that we can return these if they're better.\n // This happens when the minimization falls *approaches* but never\n // actually reaches one of the bounds\n var f10 = f(xL);\n var f20 = f(xU);\n var xL0 = xL;\n var xU0 = xU;\n\n // Simple, robust golden section minimization:\n while (++iteration < maxIterations && Math.abs(xU - xL) > tol) {\n if (f2 > f1) {\n xU = x2;\n x2 = x1;\n f2 = f1;\n x1 = xU - PHI_RATIO * (xU - xL);\n f1 = f(x1);\n } else {\n xL = x1;\n x1 = x2;\n f1 = f2;\n x2 = xL + PHI_RATIO * (xU - xL);\n f2 = f(x2);\n }\n }\n\n xF = 0.5 * (xU + xL);\n fF = 0.5 * (f1 + f2);\n\n if (status) {\n status.iterations = iteration;\n status.argmin = xF;\n status.minimum = fF;\n status.converged = true;\n }\n\n if (isNaN(f2) || isNaN(f1) || iteration === maxIterations) {\n if (status) {\n status.converged = false;\n }\n return NaN;\n }\n\n if (f10 < fF) {\n return xL0;\n } else if (f20 < fF) {\n return xU0;\n } else {\n return xF;\n }\n}\n", "'use strict';\n\nmodule.exports = bracketMinimum;\n\nfunction bracketMinimum (bounds, f, x0, dx, xMin, xMax, maxIter) {\n // If either size is unbounded (=infinite), Expand the guess\n // range until we either bracket a minimum or until we reach the bounds:\n var fU, fL, fMin, n, xL, xU, bounded;\n n = 1;\n xL = x0;\n xU = x0;\n fMin = fL = fU = f(x0);\n while (!bounded && isFinite(dx) && !isNaN(dx)) {\n ++n;\n bounded = true;\n\n if (fL <= fMin) {\n fMin = fL;\n xL = Math.max(xMin, xL - dx);\n fL = f(xL);\n bounded = false;\n }\n if (fU <= fMin) {\n fMin = fU;\n xU = Math.min(xMax, xU + dx);\n fU = f(xU);\n bounded = false;\n }\n\n // Track the smallest value seen so far:\n fMin = Math.min(fMin, fL, fU);\n\n // If either of these is the case, then the function appears\n // to be minimized against one of the bounds, so although we\n // haven't bracketed a minimum, we'll considere the procedure\n // complete because we appear to have bracketed a minimum\n // against a bound:\n if ((fL === fMin && xL === xMin) || (fU === fMin && xU === xMax)) {\n bounded = true;\n }\n\n // Increase the increment at a very quickly increasing rate to account\n // for the fact that we have *no* idea what floating point magnitude is\n // desirable. In order to avoid this, you should really provide *any\n // reasonable bounds at all* for the variables.\n dx *= n < 4 ? 2 : Math.exp(n * 0.5);\n\n if (!isFinite(dx)) {\n bounds[0] = -Infinity;\n bounds[1] = Infinity;\n return bounds;\n }\n }\n\n bounds[0] = xL;\n bounds[1] = xU;\n return bounds;\n}\n", "'use strict';\n\nvar goldenSectionMinimize = require('./src/golden-section-minimize');\nvar bracketMinimum = require('./src/bracket-minimum');\n\nvar bounds = [0, 0];\n\nmodule.exports = function minimize (f, options, status) {\n options = options || {};\n var x0;\n var tolerance = options.tolerance === undefined ? 1e-8 : options.tolerance;\n var dx = options.initialIncrement === undefined ? 1 : options.initialIncrement;\n var xMin = options.lowerBound === undefined ? -Infinity : options.lowerBound;\n var xMax = options.upperBound === undefined ? Infinity : options.upperBound;\n var maxIterations = options.maxIterations === undefined ? 100 : options.maxIterations;\n\n if (status) {\n status.iterations = 0;\n status.argmin = NaN;\n status.minimum = Infinity;\n status.converged = false;\n }\n\n if (isFinite(xMax) && isFinite(xMin)) {\n bounds[0] = xMin;\n bounds[1] = xMax;\n } else {\n // Construct the best guess we can:\n if (options.guess === undefined) {\n if (xMin > -Infinity) {\n x0 = xMax < Infinity ? 0.5 * (xMin + xMax) : xMin;\n } else {\n x0 = xMax < Infinity ? xMax : 0;\n }\n } else {\n x0 = options.guess;\n }\n\n bracketMinimum(bounds, f, x0, dx, xMin, xMax, maxIterations);\n\n if (isNaN(bounds[0]) || isNaN(bounds[1])) {\n return NaN;\n }\n }\n\n return goldenSectionMinimize(f, bounds[0], bounds[1], tolerance, maxIterations, status);\n};\n", "// transliterated from the python snippet here:\n// http://en.wikipedia.org/wiki/Lanczos_approximation\n\nvar g = 7;\nvar p = [\n 0.99999999999980993,\n 676.5203681218851,\n -1259.1392167224028,\n 771.32342877765313,\n -176.61502916214059,\n 12.507343278686905,\n -0.13857109526572012,\n 9.9843695780195716e-6,\n 1.5056327351493116e-7\n];\n\nvar g_ln = 607/128;\nvar p_ln = [\n 0.99999999999999709182,\n 57.156235665862923517,\n -59.597960355475491248,\n 14.136097974741747174,\n -0.49191381609762019978,\n 0.33994649984811888699e-4,\n 0.46523628927048575665e-4,\n -0.98374475304879564677e-4,\n 0.15808870322491248884e-3,\n -0.21026444172410488319e-3,\n 0.21743961811521264320e-3,\n -0.16431810653676389022e-3,\n 0.84418223983852743293e-4,\n -0.26190838401581408670e-4,\n 0.36899182659531622704e-5\n];\n\n// Spouge approximation (suitable for large arguments)\nfunction lngamma(z) {\n\n if(z < 0) return Number('0/0');\n var x = p_ln[0];\n for(var i = p_ln.length - 1; i > 0; --i) x += p_ln[i] / (z + i);\n var t = z + g_ln + 0.5;\n return .5*Math.log(2*Math.PI)+(z+.5)*Math.log(t)-t+Math.log(x)-Math.log(z);\n}\n\nmodule.exports = function gamma (z) {\n if (z < 0.5) {\n return Math.PI / (Math.sin(Math.PI * z) * gamma(1 - z));\n }\n else if(z > 100) return Math.exp(lngamma(z));\n else {\n z -= 1;\n var x = p[0];\n for (var i = 1; i < g + 2; i++) {\n x += p[i] / (z + i);\n }\n var t = z + g + 0.5;\n\n return Math.sqrt(2 * Math.PI)\n * Math.pow(t, z + 0.5)\n * Math.exp(-t)\n * x\n ;\n }\n};\n\nmodule.exports.log = lngamma;\n", "import {fmin} from \"./fmin\";\nimport {gamma, gammaln} from \"./gamma\";\nimport {logsumexp} from \"./logsumexp\";\n\nimport {MinimizeParams, type Model} from \"./interfaces\";\n\nconst GAMMALN_CACHE = new Map();\nfunction gammalnCached(x: number) {\n let hit = GAMMALN_CACHE.get(x);\n if (hit !== undefined) { return hit; }\n hit = gammaln(x);\n GAMMALN_CACHE.set(x, hit);\n return hit;\n}\nfunction betalnRatio(a1: number, a: number, b: number) {\n return (gammaln(a1) - gammaln(a1 + b) + gammalnCached(a + b) - gammalnCached(a));\n}\nlet betaln = (a: number, b: number) => { return gammalnCached(a) + gammalnCached(b) - gammalnCached(a + b); };\nlet betalnUncached = (a: number, b: number) => { return gammaln(a) + gammaln(b) - gammaln(a + b); };\nlet betafn = (a: number, b: number) => { return (gamma(a) * gamma(b)) / gamma(a + b); };\nfunction binomln(n: number, k: number) { return -betaln(1 + n - k, 1 + k) - Math.log(n + 1); }\nexport function customizeMath(args: Record<string, any>) {\n const orig = {betaln, betafn};\n if (args.betaln) { betaln = args.betaln; }\n if (args.betafn) { betafn = args.betafn; }\n return orig;\n}\n\nfunction _meanVarToBeta(mean: number, v: number) {\n var tmp = (mean * (1 - mean)) / v - 1;\n var alpha = mean * tmp;\n var beta = (1 - mean) * tmp;\n return [alpha, beta];\n}\n\n/**\n Expected recall log-probability now, given a prior distribution on it.\n\n `prior` is a tuple representing the prior distribution on recall probability\n after a specific unit of time has elapsed since this fact's last review.\n Specifically, it's a 3-tuple, `(alpha, beta, t)` where `alpha` and `beta`\n parameterize a Beta distribution that is the prior on recall probability at\n time `t`.\n\n `tnow` is the *actual* time elapsed since this fact's most recent review. It\n is in units consistent with `t` in your prior model.\n\n Optional parameter `exact` makes the return value a probability, specifically,\n the expected recall probability `tnow` after the last review: a number between\n 0 and 1. If `exact` is falsey, we return the log-probability; pass truthy for\n true linear probability (between 0 and 1).\n */\nexport function predictRecall(prior: Model, tnow: number, exact = false): number {\n const [alpha, beta, t] = prior;\n const dt = tnow / t;\n const ret = betalnRatio(alpha + dt, alpha, beta);\n return exact ? Math.exp(ret) : ret;\n}\n\n/**\n Update a prior on recall probability with a quiz result and time.\n\n `prior` is same as in `ebisu.predictRecall`'s arguments: an array\n representing a prior distribution on recall probability at some specific time\n after a fact's most recent review.\n\n `successes` is the number of times the user *successfully* exercised this\n memory during this review session, out of `n` attempts. Therefore, `0 <=\n successes <= total` and `1 <= total`.\n\n If the user was shown this flashcard only once during this review session,\n then `total=1`. If the quiz was a success, then `successes=1`, else\n `successes=0`. (See below for fuzzy quizzes.)\n\n If the user was shown this flashcard *multiple* times during the review\n session (e.g., Duolingo-style), then `total` can be greater than 1.\n\n If `total` is 1, `successes` can be a float between 0 and 1 inclusive. This\n implies that while there was some \"real\" quiz result, we only observed a\n scrambled version of it, which is `successes > 0.5`. A \"real\" successful quiz\n has a `max(successes, 1 - successes)` chance of being scrambled such that we\n observe a failed quiz `successes > 0.5`. E.g., `successes` of 0.9 *and* 0.1\n imply there was a 10% chance a \"real\" successful quiz could result in a failed\n quiz.\n\n This noisy quiz model also allows you to specify the related probability that\n a \"real\" quiz failure could be scrambled into the successful quiz you observed.\n Consider \"Oh no, if you'd asked me that yesterday, I would have forgotten it.\"\n By default, this probability is `1 - max(successes, 1 - successes)` but doesn't\n need to be that value. Provide `q0` to set this explicitly. See the full Ebisu\n mathematical analysis for details on this model and why this is called \"q0\".\n\n `tnow` is the time elapsed between this fact's last review in units consistent\n with `prior`.\n\n Returns a new array (like `prior`) describing the posterior distribution of\n recall probability at `tback` time after review.\n\n If `rebalance` is True, the new array represents the updated recall\n probability at *the halflife*, i,e., `tback` such that the expected\n recall probability is is 0.5. This is the default behavior.\n\n Performance-sensitive users might consider disabling rebalancing. In that\n case, they may pass in the `tback` that the returned model should correspond\n to. If none is provided, the returned model represets recall at the same time\n as the input model.\n\n N.B. This function is tested for numerical stability for small `total < 5`. It\n may be unstable for much larger `total`.\n\n N.B.2. This function may throw an assertion error upon numerical instability.\n This can happen if the algorithm is *extremely* surprised by a result; for\n example, if `successes=0` and `total=5` (complete failure) when `tnow` is very\n small compared to the halflife encoded in `prior`. Calling functions are asked\n to call this inside a try-except block and to handle any possible\n `AssertionError`s in a manner consistent with user expectations, for example,\n by faking a more reasonable `tnow`. Please open an issue if you encounter such\n exceptions for cases that you think are reasonable.\n */\nexport function updateRecall(\n prior: Model,\n successes: number,\n total: number,\n tnow: number,\n q0?: number,\n rebalance = true,\n tback?: number,\n {useLog = false, tolerance = 1e-8}: Partial<MinimizeParams> = {},\n ): Model {\n if (0 > successes || successes > total || total < 1) {\n throw new Error(\"0 <= successes and successes <= total and 1 <= total must be true\");\n }\n\n if (total === 1) { return _updateRecallSingle(prior, successes, tnow, q0, rebalance, tback, {useLog, tolerance}); }\n\n if (!(successes === Math.trunc(successes) && total === Math.trunc(total))) {\n throw new Error('expecting integer successes and total')\n }\n\n const [alpha, beta, t] = prior;\n const dt = tnow / t;\n const failures = total - successes;\n const binomlns: number[] = [];\n for (let i = 0; i <= failures; i++) { binomlns.push(binomln(failures, i)); }\n\n function unnormalizedLogMoment(m: number, et: number) {\n const logProbs = [];\n for (let i = 0; i <= failures; i++) {\n logProbs.push(binomlns[i] + betaln(alpha + dt * (successes + i) + m * dt * et, beta));\n }\n const signs = [];\n for (let i = 0; i <= failures; i++) { signs.push(Math.pow(-1, i)); }\n return logsumexp(logProbs, signs)[0];\n }\n\n const logDenominator = unnormalizedLogMoment(0, 0);\n\n let et: number;\n if (rebalance) {\n const target = Math.log(0.5);\n const rootfn = (et: number) => unnormalizedLogMoment(1, et) - logDenominator - target;\n const status = {};\n const sol = fmin((x) => Math.abs(rootfn(x)), {tolerance}, status);\n if (!(\"converged\" in status) || !status.converged) {\n console.log(status);\n throw new Error(\"failed to converge: binomial\");\n }\n\n et = sol;\n tback = et * tnow;\n }\n if (tback) {\n et = tback / tnow;\n } else {\n tback = t;\n et = tback / tnow;\n }\n\n const logMean = unnormalizedLogMoment(1, et) - logDenominator;\n const mean = Math.exp(logMean);\n const m2 = Math.exp(unnormalizedLogMoment(2, et) - logDenominator);\n\n if (mean <= 0) { throw new Error(\"negative mean encountered\"); }\n if (m2 <= 0) { throw new Error(\"negative 2nd moment encountered\"); }\n\n const meanSq = Math.exp(2 * logMean);\n const variance = m2 - meanSq;\n if (variance <= 0) { throw new Error(\"negative variance encountered\"); }\n const [newAlpha, newBeta] = _meanVarToBeta(mean, variance);\n return [newAlpha, newBeta, tback];\n}\n\nfunction _updateRecallSingle(\n prior: Model,\n result: number,\n tnow: number,\n q0?: number,\n rebalance = true,\n tback?: number,\n {useLog = false, tolerance = 1e-8}: Partial<MinimizeParams> = {},\n ): Model {\n if (!(0 <= result && result <= 1)) { throw new Error('expecting result between 0 and 1 inclusive') }\n const [alpha, beta, t] = prior;\n\n const z = result > 0.5;\n const q1 = z ? result : 1 - result;\n if (q0 === undefined) { q0 = 1 - q1; }\n\n const dt = tnow / t;\n\n let [c, d] = z ? [q1 - q0, q0] : [q0 - q1, 1 - q0];\n\n const den = c * betafn(alpha + dt, beta) + d * (betafn(alpha, beta) || 0);\n const logden =\n useLog ? logsumexp([betalnUncached(alpha + dt, beta), (betalnUncached(alpha, beta) || -Infinity)], [c, d])[0] : 0;\n\n function moment(N: number, et: number) {\n let num = c * betafn(alpha + dt + N * dt * et, beta);\n if (d !== 0) { num += d * betafn(alpha + N * dt * et, beta); }\n return num / den;\n }\n function logmoment(N: number, et: number) {\n if (d !== 0) {\n const res =\n logsumexp([betalnUncached(alpha + dt + N * dt * et, beta), betalnUncached(alpha + N * dt * et, beta)], [c, d])\n return res[0] - logden\n }\n return Math.log(c) + betalnUncached(alpha + dt + N * dt * et, beta) - logden\n }\n\n let et: number;\n if (rebalance) {\n const status = {};\n let sol: number;\n if (useLog) {\n const target = Math.log(0.5)\n sol = fmin((et) => Math.abs(logmoment(1, et) - target), {lowerBound: 0, tolerance}, status);\n } else {\n sol = fmin((et) => Math.abs(moment(1, et) - 0.5), {lowerBound: 0}, status);\n }\n if (!(\"converged\" in status) || !status.converged) {\n if (!useLog) {\n // for very long t, Substack's Gamma results in a lot of NaNs? But this can be avoided by using logs:\n return _updateRecallSingle(prior, result, tnow, q0, rebalance, tback, {tolerance, useLog: !useLog});\n }\n\n console.error(status, {prior, result, tnow, q0, rebalance, tback});\n throw new Error(\"failed to converge\");\n }\n et = sol;\n tback = et * tnow;\n } else if (tback) {\n et = tback / tnow;\n } else {\n tback = t;\n et = tback / tnow;\n }\n\n const mean = useLog ? Math.exp(logmoment(1, et)) : moment(1, et);\n const secondMoment = useLog ? Math.exp(logmoment(2, et)) : moment(2, et);\n\n const variance = secondMoment - mean * mean;\n const [newAlpha, newBeta] = _meanVarToBeta(mean, variance);\n if (!(newAlpha > 0 && newBeta > 0 && isFinite(newAlpha) && isFinite(newBeta))) {\n // same as above: as a last-ditch effort to salvage this, try rerunning this function in the log-domain\n if (!useLog) {\n return _updateRecallSingle(prior, result, tnow, q0, rebalance, tback, {tolerance, useLog: !useLog});\n }\n\n throw new Error(\"newAlpha and newBeta must be finite and greater than zero\");\n }\n return [newAlpha, newBeta, tback];\n}\n\n/**\n Convert recall probability prior's raw parameters into a model object.\n\n `t` is your guess as to the half-life of any given fact, in units that you\n must be consistent with throughout your use of Ebisu.\n\n `alpha` and `beta` are the parameters of the Beta distribution that describe\n your beliefs about the recall probability of a fact `t` time units after that\n fact has been studied/reviewed/quizzed. If they are the same, `t` is a true\n half-life, and this is a recommended way to create a default model for all\n newly-learned facts. If `beta` is omitted, it is taken to be the same as\n `alpha`.\n */\nexport function defaultModel(t: number, a = 4.0, b = a): Model { return [a, b, t]; }\n\n/**\n When will memory decay to a given percentile?\n\n Given a memory `model` of the kind consumed by `predictRecall`,\n etc., and optionally a `percentile` (defaults to 0.5, the\n half-life), find the time it takes for memory to decay to\n `percentile`.\n */\nexport function modelToPercentileDecay(model: Model, percentile = 0.5, tolerance = 1e-4): number {\n if (percentile < 0 || percentile > 1) { throw new Error(\"percentiles must be between (0, 1) exclusive\"); }\n const [alpha, beta, t0] = model;\n const logBab = betaln(alpha, beta);\n const logPercentile = Math.log(percentile);\n function f(delta: number) {\n const logMean = betaln(alpha + delta, beta) - logBab;\n return Math.abs(logMean - logPercentile);\n }\n let status = {};\n const sol = fmin(f, {lowerBound: 0, tolerance}, status);\n if (!(\"converged\" in status) || !status.converged) { throw new Error(\"failed to converge\"); }\n return sol * t0;\n}\n\n/**\n Given any model, return a new model with the original's halflife scaled.\n Use this function to adjust the halflife of a model.\n\n Perhaps you want to see this flashcard far less, because you *really* know it.\n `newModel = rescaleHalflife(model, 5)` to shift its memory model out to five\n times the old halflife.\n\n Or if there's a flashcard that suddenly you want to review more frequently,\n perhaps because you've recently learned a confuser flashcard that interferes\n with your memory of the first, `newModel = rescaleHalflife(model, 0.1)` will\n reduce its halflife by a factor of one-tenth.\n\n Useful tip: the returned model will have matching \u03B1 = \u03B2, where `alpha, beta,\n newHalflife = newModel`. This happens because we first find the old model's\n halflife, then we time-shift its probability density to that halflife. The\n halflife is the time when recall probability is 0.5, which implies \u03B1 = \u03B2.\n That is the distribution this function returns, except at the *scaled*\n halflife.\n */\nexport function rescaleHalflife(prior: Model, scale = 1): Model {\n const [alpha, beta, t] = prior;\n const oldHalflife = modelToPercentileDecay(prior);\n const dt = oldHalflife / t;\n\n const logDenominator = betaln(alpha, beta);\n const logm2 = betaln(alpha + 2 * dt, beta) - logDenominator;\n const m2 = Math.exp(logm2);\n const newAlphaBeta = 1 / (8 * m2 - 2) - 0.5;\n if (newAlphaBeta <= 0) { throw new Error(\"non-positive alpha, beta encountered\"); }\n return [newAlphaBeta, newAlphaBeta, oldHalflife * scale];\n}\n", "export const fmin = require(\"minimize-golden-section-1d\") as (objective: (x: number) => number,\n options: Partial<Options>, status?: {}|Status) => number;\n\nexport interface Options {\n tolerance: number;\n lowerBound: number;\n upperBound: number;\n maxIterations: number;\n guess: number;\n initialIncrement: number;\n}\n\nexport interface Status {\n converged: boolean;\n iterations: number;\n minimum: number;\n argmin: number;\n}\n", "export const gamma = require(\"gamma\") as ((x: number) => number) & { log: (x: number) => number; };\nexport const gammaln = gamma.log;\n", "const exp = Math.exp;\nconst log = Math.log;\nconst sign = Math.sign;\nconst max = Math.max;\n\nexport function logsumexp(a: number[], b: number[]) {\n const a_max = max(...a);\n let s = 0;\n for (let i = a.length - 1; i >= 0; i--) { s += b[i] * exp(a[i] - a_max); }\n const sgn = sign(s);\n s *= sgn;\n const out = log(s) + a_max;\n return [out, sgn];\n}\n"],
"mappings": "6fAAA,IAAAA,EAAAC,EAAA,CAAAC,GAAAC,IAAA,cAEA,IAAIC,EAAY,GAAK,EAAI,KAAK,KAAK,CAAC,GAEpCD,EAAO,QAAUE,GAEjB,SAASA,GAAuBC,EAAGC,EAAIC,EAAIC,EAAKC,EAAeC,EAAQ,CAkBrE,QAjBIC,EAAIC,EACJC,EAAY,EACZC,EAAKP,EAAKJ,GAAaI,EAAKD,GAC5BS,EAAKT,EAAKH,GAAaI,EAAKD,GAE5BU,EAAKX,EAAES,CAAE,EACTG,EAAKZ,EAAEU,CAAE,EAKTG,EAAMb,EAAEC,CAAE,EACVa,EAAMd,EAAEE,CAAE,EACVa,EAAMd,EACNe,EAAMd,EAGH,EAAEM,EAAYJ,GAAiB,KAAK,IAAIF,EAAKD,CAAE,EAAIE,GACpDS,EAAKD,GACPT,EAAKQ,EACLA,EAAKD,EACLG,EAAKD,EACLF,EAAKP,EAAKJ,GAAaI,EAAKD,GAC5BU,EAAKX,EAAES,CAAE,IAETR,EAAKQ,EACLA,EAAKC,EACLC,EAAKC,EACLF,EAAKT,EAAKH,GAAaI,EAAKD,GAC5BW,EAAKZ,EAAEU,CAAE,GAcb,OAVAJ,EAAK,IAAOJ,EAAKD,GACjBM,EAAK,IAAOI,EAAKC,GAEbP,IACFA,EAAO,WAAaG,EACpBH,EAAO,OAASC,EAChBD,EAAO,QAAUE,EACjBF,EAAO,UAAY,IAGjB,MAAMO,CAAE,GAAK,MAAMD,CAAE,GAAKH,IAAcJ,GACtCC,IACFA,EAAO,UAAY,IAEd,KAGLQ,EAAMN,EACDQ,EACED,EAAMP,EACRS,EAEAV,CAEX,IChEA,IAAAW,EAAAC,EAAA,CAAAC,GAAAC,IAAA,cAEAA,EAAO,QAAUC,GAEjB,SAASA,GAAgBC,EAAQC,EAAGC,EAAIC,EAAIC,EAAMC,EAAMC,EAAS,CAG/D,IAAIC,EAAIC,EAAIC,EAAMC,EAAGC,EAAIC,EAAIC,EAK7B,IAJAH,EAAI,EACJC,EAAKT,EACLU,EAAKV,EACLO,EAAOD,EAAKD,EAAKN,EAAEC,CAAE,EACd,CAACW,GAAW,SAASV,CAAE,GAAK,CAAC,MAAMA,CAAE,GAmC1C,GAlCA,EAAEO,EACFG,EAAU,GAENL,GAAMC,IACRA,EAAOD,EACPG,EAAK,KAAK,IAAIP,EAAMO,EAAKR,CAAE,EAC3BK,EAAKP,EAAEU,CAAE,EACTE,EAAU,IAERN,GAAME,IACRA,EAAOF,EACPK,EAAK,KAAK,IAAIP,EAAMO,EAAKT,CAAE,EAC3BI,EAAKN,EAAEW,CAAE,EACTC,EAAU,IAIZJ,EAAO,KAAK,IAAIA,EAAMD,EAAID,CAAE,GAOvBC,IAAOC,GAAQE,IAAOP,GAAUG,IAAOE,GAAQG,IAAOP,KACzDQ,EAAU,IAOZV,GAAMO,EAAI,EAAI,EAAI,KAAK,IAAIA,EAAI,EAAG,EAE9B,CAAC,SAASP,CAAE,EACd,OAAAH,EAAO,CAAC,EAAI,KACZA,EAAO,CAAC,EAAI,IACLA,EAIX,OAAAA,EAAO,CAAC,EAAIW,EACZX,EAAO,CAAC,EAAIY,EACLZ,CACT,ICzDA,IAAAc,EAAAC,EAAA,CAAAC,GAAAC,IAAA,cAEA,IAAIC,GAAwB,IACxBC,GAAiB,IAEjBC,EAAS,CAAC,EAAG,CAAC,EAElBH,EAAO,QAAU,SAAmBI,EAAGC,EAASC,EAAQ,CACtDD,EAAUA,GAAW,CAAC,EACtB,IAAIE,EACAC,EAAYH,EAAQ,YAAc,OAAY,KAAOA,EAAQ,UAC7DI,EAAKJ,EAAQ,mBAAqB,OAAY,EAAIA,EAAQ,iBAC1DK,EAAOL,EAAQ,aAAe,OAAY,KAAYA,EAAQ,WAC9DM,EAAON,EAAQ,aAAe,OAAY,IAAWA,EAAQ,WAC7DO,EAAgBP,EAAQ,gBAAkB,OAAY,IAAMA,EAAQ,cASxE,GAPIC,IACFA,EAAO,WAAa,EACpBA,EAAO,OAAS,IAChBA,EAAO,QAAU,IACjBA,EAAO,UAAY,IAGjB,SAASK,CAAI,GAAK,SAASD,CAAI,EACjCP,EAAO,CAAC,EAAIO,EACZP,EAAO,CAAC,EAAIQ,UAGRN,EAAQ,QAAU,OAChBK,EAAO,KACTH,EAAKI,EAAO,IAAW,IAAOD,EAAOC,GAAQD,EAE7CH,EAAKI,EAAO,IAAWA,EAAO,EAGhCJ,EAAKF,EAAQ,MAGfH,GAAeC,EAAQC,EAAGG,EAAIE,EAAIC,EAAMC,EAAMC,CAAa,EAEvD,MAAMT,EAAO,CAAC,CAAC,GAAK,MAAMA,EAAO,CAAC,CAAC,EACrC,MAAO,KAIX,OAAOF,GAAsBG,EAAGD,EAAO,CAAC,EAAGA,EAAO,CAAC,EAAGK,EAAWI,EAAeN,CAAM,CACxF,IC9CA,IAAAO,GAAAC,EAAA,CAAAC,GAAAC,IAAA,CAGA,IAAIC,EAAI,EACJC,EAAI,CACJ,kBACA,kBACA,oBACA,kBACA,mBACA,mBACA,oBACA,qBACA,qBACJ,EAEIC,GAAO,IAAI,IACXC,EAAO,CACP,kBACA,kBACA,mBACA,mBACA,mBACA,qBACA,qBACA,sBACA,qBACA,uBACA,sBACA,sBACA,qBACA,uBACA,qBACJ,EAGA,SAASC,EAAQC,EAAG,CAEhB,GAAGA,EAAI,EAAG,OAAO,OAAO,KAAK,EAE7B,QADIC,EAAIH,EAAK,CAAC,EACNI,EAAIJ,EAAK,OAAS,EAAGI,EAAI,EAAG,EAAEA,EAAGD,GAAKH,EAAKI,CAAC,GAAKF,EAAIE,GAC7D,IAAI,EAAIF,EAAIH,GAAO,GACnB,MAAO,IAAG,KAAK,IAAI,EAAE,KAAK,EAAE,GAAGG,EAAE,IAAI,KAAK,IAAI,CAAC,EAAE,EAAE,KAAK,IAAIC,CAAC,EAAE,KAAK,IAAID,CAAC,CAC7E,CAEAN,EAAO,QAAU,SAASS,EAAOH,EAAG,CAChC,GAAIA,EAAI,GACJ,OAAO,KAAK,IAAM,KAAK,IAAI,KAAK,GAAKA,CAAC,EAAIG,EAAM,EAAIH,CAAC,GAEpD,GAAGA,EAAI,IAAK,OAAO,KAAK,IAAID,EAAQC,CAAC,CAAC,EAEvCA,GAAK,EAEL,QADIC,EAAIL,EAAE,CAAC,EACFM,EAAI,EAAGA,EAAIP,EAAI,EAAGO,IACvBD,GAAKL,EAAEM,CAAC,GAAKF,EAAIE,GAErB,IAAIE,EAAIJ,EAAIL,EAAI,GAEhB,OAAO,KAAK,KAAK,EAAI,KAAK,EAAE,EACtB,KAAK,IAAIS,EAAGJ,EAAI,EAAG,EACnB,KAAK,IAAI,CAACI,CAAC,EACXH,CAGd,EAEAP,EAAO,QAAQ,IAAMK,IClErB,IAAAM,GAAA,GAAAC,GAAAD,GAAA,mBAAAE,GAAA,iBAAAC,GAAA,2BAAAC,GAAA,kBAAAC,GAAA,oBAAAC,GAAA,iBAAAC,KCAO,IAAMC,EAAO,ICAb,IAAMC,EAAQ,KACRC,EAAUD,EAAM,ICD7B,IAAME,GAAM,KAAK,IACXC,GAAM,KAAK,IACXC,GAAO,KAAK,KACZC,GAAM,KAAK,IAEV,SAASC,EAAUC,EAAaC,EAAa,CAClD,IAAMC,EAAQJ,GAAI,GAAGE,CAAC,EAClBG,EAAI,EACR,QAASC,EAAIJ,EAAE,OAAS,EAAGI,GAAK,EAAGA,IAAOD,GAAKF,EAAEG,CAAC,EAAIT,GAAIK,EAAEI,CAAC,EAAIF,CAAK,EACtE,IAAMG,EAAMR,GAAKM,CAAC,EAClB,OAAAA,GAAKE,EAEE,CADKT,GAAIO,CAAC,EAAID,EACRG,CAAG,CAClB,CHPA,IAAMC,GAAgB,IAAI,IAC1B,SAASC,EAAcC,EAAW,CAChC,IAAIC,EAAMH,GAAc,IAAIE,CAAC,EAC7B,OAAIC,IAAQ,SACZA,EAAMC,EAAQF,CAAC,EACfF,GAAc,IAAIE,EAAGC,CAAG,GACjBA,CACT,CACA,SAASE,GAAYC,EAAYC,EAAWC,EAAW,CACrD,OAAQJ,EAAQE,CAAE,EAAIF,EAAQE,EAAKE,CAAC,EAAIP,EAAcM,EAAIC,CAAC,EAAIP,EAAcM,CAAC,CAChF,CACA,IAAIE,EAAS,CAACF,EAAWC,IAAuBP,EAAcM,CAAC,EAAIN,EAAcO,CAAC,EAAIP,EAAcM,EAAIC,CAAC,EACrGE,EAAiB,CAACH,EAAWC,IAAuBJ,EAAQG,CAAC,EAAIH,EAAQI,CAAC,EAAIJ,EAAQG,EAAIC,CAAC,EAC3FG,EAAS,CAACJ,EAAWC,IAAwBI,EAAML,CAAC,EAAIK,EAAMJ,CAAC,EAAKI,EAAML,EAAIC,CAAC,EACnF,SAASK,GAAQ,EAAWC,EAAW,CAAE,MAAO,CAACL,EAAO,EAAI,EAAIK,EAAG,EAAIA,CAAC,EAAI,KAAK,IAAI,EAAI,CAAC,CAAG,CACtF,SAASC,GAAcC,EAA2B,CACvD,IAAMC,EAAO,CAAC,OAAAR,EAAQ,OAAAE,CAAM,EAC5B,OAAIK,EAAK,SAAUP,EAASO,EAAK,QAC7BA,EAAK,SAAUL,EAASK,EAAK,QAC1BC,CACT,CAEA,SAASC,GAAeC,EAAcC,EAAW,CAC/C,IAAIC,EAAOF,GAAQ,EAAIA,GAASC,EAAI,EAChCE,EAAQH,EAAOE,EACfE,GAAQ,EAAIJ,GAAQE,EACxB,MAAO,CAACC,EAAOC,CAAI,CACrB,CAmBO,SAASC,GAAcC,EAAcC,EAAcC,EAAQ,GAAe,CAC/E,GAAM,CAACL,EAAOC,EAAMK,CAAC,EAAIH,EACnBI,EAAKH,EAAOE,EACZE,EAAMzB,GAAYiB,EAAQO,EAAIP,EAAOC,CAAI,EAC/C,OAAOI,EAAQ,KAAK,IAAIG,CAAG,EAAIA,CACjC,CA8DO,SAASC,GACZN,EACAO,EACAC,EACAP,EACAQ,EACAC,EAAY,GACZC,EACA,CAAC,OAAAC,EAAS,GAAO,UAAAC,EAAY,IAAI,EAA6B,CAAC,EACtD,CACX,GAAI,EAAIN,GAAaA,EAAYC,GAASA,EAAQ,EAChD,MAAM,IAAI,MAAM,mEAAmE,EAGrF,GAAIA,IAAU,EAAK,OAAOM,EAAoBd,EAAOO,EAAWN,EAAMQ,EAAIC,EAAWC,EAAO,CAAC,OAAAC,EAAQ,UAAAC,CAAS,CAAC,EAE/G,GAAI,EAAEN,IAAc,KAAK,MAAMA,CAAS,GAAKC,IAAU,KAAK,MAAMA,CAAK,GACrE,MAAM,IAAI,MAAM,uCAAuC,EAGzD,GAAM,CAACX,EAAOC,EAAMK,CAAC,EAAIH,EACnBI,EAAKH,EAAOE,EACZY,EAAWP,EAAQD,EACnBS,EAAqB,CAAC,EAC5B,QAASC,EAAI,EAAGA,GAAKF,EAAUE,IAAOD,EAAS,KAAK5B,GAAQ2B,EAAUE,CAAC,CAAC,EAExE,SAASC,EAAsBC,EAAWC,EAAY,CACpD,IAAMC,EAAW,CAAC,EAClB,QAASJ,EAAI,EAAGA,GAAKF,EAAUE,IAC7BI,EAAS,KAAKL,EAASC,CAAC,EAAIjC,EAAOa,EAAQO,GAAMG,EAAYU,GAAKE,EAAIf,EAAKgB,EAAItB,CAAI,CAAC,EAEtF,IAAMwB,EAAQ,CAAC,EACf,QAASL,EAAI,EAAGA,GAAKF,EAAUE,IAAOK,EAAM,KAAK,KAAK,IAAI,GAAIL,CAAC,CAAC,EAChE,OAAOM,EAAUF,EAAUC,CAAK,EAAE,CAAC,CACrC,CAEA,IAAME,EAAiBN,EAAsB,EAAG,CAAC,EAE7CE,EACJ,GAAIV,EAAW,CACb,IAAMe,EAAS,KAAK,IAAI,EAAG,EACrBC,EAAUN,GAAeF,EAAsB,EAAGE,CAAE,EAAII,EAAiBC,EACzEE,EAAS,CAAC,EACVC,EAAMC,EAAMpD,GAAM,KAAK,IAAIiD,EAAOjD,CAAC,CAAC,EAAG,CAAC,UAAAoC,CAAS,EAAGc,CAAM,EAChE,GAAI,EAAE,cAAeA,IAAW,CAACA,EAAO,UACtC,cAAQ,IAAIA,CAAM,EACZ,IAAI,MAAM,8BAA8B,EAGhDP,EAAKQ,EACLjB,EAAQS,EAAKnB,CACf,CACIU,IAGFA,EAAQR,GACRiB,EAAKT,EAAQV,EAGf,IAAM6B,EAAUZ,EAAsB,EAAGE,CAAE,EAAII,EACzC9B,EAAO,KAAK,IAAIoC,CAAO,EACvBC,EAAK,KAAK,IAAIb,EAAsB,EAAGE,CAAE,EAAII,CAAc,EAEjE,GAAI9B,GAAQ,EAAK,MAAM,IAAI,MAAM,2BAA2B,EAC5D,GAAIqC,GAAM,EAAK,MAAM,IAAI,MAAM,iCAAiC,EAEhE,IAAMC,EAAS,KAAK,IAAI,EAAIF,CAAO,EAC7BG,EAAWF,EAAKC,EACtB,GAAIC,GAAY,EAAK,MAAM,IAAI,MAAM,+BAA+B,EACpE,GAAM,CAACC,EAAUC,CAAO,EAAI1C,GAAeC,EAAMuC,CAAQ,EACzD,MAAO,CAACC,EAAUC,EAASxB,CAAK,CAClC,CAEA,SAASG,EACLd,EACAoC,EACAnC,EACAQ,EACAC,EAAY,GACZC,EACA,CAAC,OAAAC,EAAS,GAAO,UAAAC,EAAY,IAAI,EAA6B,CAAC,EACtD,CACX,GAAI,EAAE,GAAKuB,GAAUA,GAAU,GAAM,MAAM,IAAI,MAAM,4CAA4C,EACjG,GAAM,CAACvC,EAAOC,EAAMK,CAAC,EAAIH,EAEnBqC,EAAID,EAAS,GACbE,EAAKD,EAAID,EAAS,EAAIA,EACxB3B,IAAO,SAAaA,EAAK,EAAI6B,GAEjC,IAAMlC,EAAKH,EAAOE,EAEd,CAACoC,EAAGC,CAAC,EAAIH,EAAI,CAACC,EAAK7B,EAAIA,CAAE,EAAI,CAACA,EAAK6B,EAAI,EAAI7B,CAAE,EAE3CgC,EAAMF,EAAIrD,EAAOW,EAAQO,EAAIN,CAAI,EAAI0C,GAAKtD,EAAOW,EAAOC,CAAI,GAAK,GACjE4C,EACF9B,EAASW,EAAU,CAACtC,EAAeY,EAAQO,EAAIN,CAAI,EAAIb,EAAeY,EAAOC,CAAI,GAAK,IAAU,EAAG,CAACyC,EAAGC,CAAC,CAAC,EAAE,CAAC,EAAI,EAEpH,SAASG,EAAOC,EAAWxB,EAAY,CACrC,IAAIyB,EAAMN,EAAIrD,EAAOW,EAAQO,EAAKwC,EAAIxC,EAAKgB,EAAItB,CAAI,EACnD,OAAI0C,IAAM,IAAKK,GAAOL,EAAItD,EAAOW,EAAQ+C,EAAIxC,EAAKgB,EAAItB,CAAI,GACnD+C,EAAMJ,CACf,CACA,SAASK,EAAUF,EAAWxB,EAAY,CACxC,OAAIoB,IAAM,EAEJjB,EAAU,CAACtC,EAAeY,EAAQO,EAAKwC,EAAIxC,EAAKgB,EAAItB,CAAI,EAAGb,EAAeY,EAAQ+C,EAAIxC,EAAKgB,EAAItB,CAAI,CAAC,EAAG,CAACyC,EAAGC,CAAC,CAAC,EACtG,CAAC,EAAIE,EAEX,KAAK,IAAIH,CAAC,EAAItD,EAAeY,EAAQO,EAAKwC,EAAIxC,EAAKgB,EAAItB,CAAI,EAAI4C,CACxE,CAEA,IAAItB,EACJ,GAAIV,EAAW,CACb,IAAMiB,EAAS,CAAC,EACZC,EACJ,GAAIhB,EAAQ,CACV,IAAMa,EAAS,KAAK,IAAI,EAAG,EAC3BG,EAAMC,EAAMT,GAAO,KAAK,IAAI0B,EAAU,EAAG1B,CAAE,EAAIK,CAAM,EAAG,CAAC,WAAY,EAAG,UAAAZ,CAAS,EAAGc,CAAM,CAC5F,MACEC,EAAMC,EAAMT,GAAO,KAAK,IAAIuB,EAAO,EAAGvB,CAAE,EAAI,EAAG,EAAG,CAAC,WAAY,CAAC,EAAGO,CAAM,EAE3E,GAAI,EAAE,cAAeA,IAAW,CAACA,EAAO,UAAW,CACjD,GAAI,CAACf,EAEH,OAAOE,EAAoBd,EAAOoC,EAAQnC,EAAMQ,EAAIC,EAAWC,EAAO,CAAC,UAAAE,EAAW,OAAQ,CAACD,CAAM,CAAC,EAGpG,cAAQ,MAAMe,EAAQ,CAAC,MAAA3B,EAAO,OAAAoC,EAAQ,KAAAnC,EAAM,GAAAQ,EAAI,UAAAC,EAAW,MAAAC,CAAK,CAAC,EAC3D,IAAI,MAAM,oBAAoB,CACtC,CACAS,EAAKQ,EACLjB,EAAQS,EAAKnB,CACf,MAAWU,IAGTA,EAAQR,GACRiB,EAAKT,EAAQV,EAGf,IAAMP,EAAOkB,EAAS,KAAK,IAAIkC,EAAU,EAAG1B,CAAE,CAAC,EAAIuB,EAAO,EAAGvB,CAAE,EAGzDa,GAFerB,EAAS,KAAK,IAAIkC,EAAU,EAAG1B,CAAE,CAAC,EAAIuB,EAAO,EAAGvB,CAAE,GAEvC1B,EAAOA,EACjC,CAACwC,EAAUC,CAAO,EAAI1C,GAAeC,EAAMuC,CAAQ,EACzD,GAAI,EAAEC,EAAW,GAAKC,EAAU,GAAK,SAASD,CAAQ,GAAK,SAASC,CAAO,GAAI,CAE7E,GAAI,CAACvB,EACH,OAAOE,EAAoBd,EAAOoC,EAAQnC,EAAMQ,EAAIC,EAAWC,EAAO,CAAC,UAAAE,EAAW,OAAQ,CAACD,CAAM,CAAC,EAGpG,MAAM,IAAI,MAAM,2DAA2D,CAC7E,CACA,MAAO,CAACsB,EAAUC,EAASxB,CAAK,CAClC,CAeO,SAASoC,GAAa5C,EAAWrB,EAAI,EAAKC,EAAID,EAAU,CAAE,MAAO,CAACA,EAAGC,EAAGoB,CAAC,CAAG,CAU5E,SAAS6C,GAAuBC,EAAcC,EAAa,GAAKrC,EAAY,KAAc,CAC/F,GAAIqC,EAAa,GAAKA,EAAa,EAAK,MAAM,IAAI,MAAM,8CAA8C,EACtG,GAAM,CAACrD,EAAOC,EAAMqD,CAAE,EAAIF,EACpBG,EAASpE,EAAOa,EAAOC,CAAI,EAC3BuD,EAAgB,KAAK,IAAIH,CAAU,EACzC,SAASI,EAAEC,EAAe,CACxB,IAAMzB,EAAU9C,EAAOa,EAAQ0D,EAAOzD,CAAI,EAAIsD,EAC9C,OAAO,KAAK,IAAItB,EAAUuB,CAAa,CACzC,CACA,IAAI1B,EAAS,CAAC,EACRC,EAAMC,EAAKyB,EAAG,CAAC,WAAY,EAAG,UAAAzC,CAAS,EAAGc,CAAM,EACtD,GAAI,EAAE,cAAeA,IAAW,CAACA,EAAO,UAAa,MAAM,IAAI,MAAM,oBAAoB,EACzF,OAAOC,EAAMuB,CACf,CAsBO,SAASK,GAAgBxD,EAAcyD,EAAQ,EAAU,CAC9D,GAAM,CAAC5D,EAAOC,EAAMK,CAAC,EAAIH,EACnB0D,EAAcV,GAAuBhD,CAAK,EAC1CI,EAAKsD,EAAcvD,EAEnBqB,EAAiBxC,EAAOa,EAAOC,CAAI,EACnC6D,EAAQ3E,EAAOa,EAAQ,EAAIO,EAAIN,CAAI,EAAI0B,EAEvCoC,EAAe,GAAK,EADf,KAAK,IAAID,CAAK,EACU,GAAK,GACxC,GAAIC,GAAgB,EAAK,MAAM,IAAI,MAAM,sCAAsC,EAC/E,MAAO,CAACA,EAAcA,EAAcF,EAAcD,CAAK,CACzD",
"names": ["require_golden_section_minimize", "__commonJSMin", "exports", "module", "PHI_RATIO", "goldenSectionMinimize", "f", "xL", "xU", "tol", "maxIterations", "status", "xF", "fF", "iteration", "x1", "x2", "f1", "f2", "f10", "f20", "xL0", "xU0", "require_bracket_minimum", "__commonJSMin", "exports", "module", "bracketMinimum", "bounds", "f", "x0", "dx", "xMin", "xMax", "maxIter", "fU", "fL", "fMin", "n", "xL", "xU", "bounded", "require_minimize_golden_section_1d", "__commonJSMin", "exports", "module", "goldenSectionMinimize", "bracketMinimum", "bounds", "f", "options", "status", "x0", "tolerance", "dx", "xMin", "xMax", "maxIterations", "require_gamma", "__commonJSMin", "exports", "module", "g", "p", "g_ln", "p_ln", "lngamma", "z", "x", "i", "gamma", "t", "ebisu_exports", "__export", "customizeMath", "defaultModel", "modelToPercentileDecay", "predictRecall", "rescaleHalflife", "updateRecall", "fmin", "gamma", "gammaln", "exp", "log", "sign", "max", "logsumexp", "a", "b", "a_max", "s", "i", "sgn", "GAMMALN_CACHE", "gammalnCached", "x", "hit", "gammaln", "betalnRatio", "a1", "a", "b", "betaln", "betalnUncached", "betafn", "gamma", "binomln", "k", "customizeMath", "args", "orig", "_meanVarToBeta", "mean", "v", "tmp", "alpha", "beta", "predictRecall", "prior", "tnow", "exact", "t", "dt", "ret", "updateRecall", "successes", "total", "q0", "rebalance", "tback", "useLog", "tolerance", "_updateRecallSingle", "failures", "binomlns", "i", "unnormalizedLogMoment", "m", "et", "logProbs", "signs", "logsumexp", "logDenominator", "target", "rootfn", "status", "sol", "fmin", "logMean", "m2", "meanSq", "variance", "newAlpha", "newBeta", "result", "z", "q1", "c", "d", "den", "logden", "moment", "N", "num", "logmoment", "defaultModel", "modelToPercentileDecay", "model", "percentile", "t0", "logBab", "logPercentile", "f", "delta", "rescaleHalflife", "scale", "oldHalflife", "logm2", "newAlphaBeta"]
}