|
| 1 | +# Moments of the truncated log-normal can be computed directly from the moment generating |
| 2 | +# function of the truncated normal: |
| 3 | +# Let Y ~ LogNormal(μ, σ) truncated to (a, b). Then log(Y) ~ Normal(μ, σ) truncated |
| 4 | +# to (log(a), log(b)), and E[Y^n] = E[(e^log(Y))^n] = E[e^(nlog(Y))] = mgf(log(Y), n). |
| 5 | + |
| 6 | +# Given `truncate(LogNormal(μ, σ), a, b)`, return `truncate(Normal(μ, σ), log(a), log(b))` |
| 7 | +function _truncnorm(d::Truncated{<:LogNormal}) |
| 8 | + μ, σ = params(d.untruncated) |
| 9 | + T = partype(d) |
| 10 | + a = d.lower === nothing ? nothing : log(T(d.lower)) |
| 11 | + b = d.upper === nothing ? nothing : log(T(d.upper)) |
| 12 | + return truncated(Normal(μ, σ), a, b) |
| 13 | +end |
| 14 | + |
| 15 | +mean(d::Truncated{<:LogNormal}) = mgf(_truncnorm(d), 1) |
| 16 | + |
| 17 | +function var(d::Truncated{<:LogNormal}) |
| 18 | + tn = _truncnorm(d) |
| 19 | + # Ensure the variance doesn't end up negative, which can occur due to numerical issues |
| 20 | + return max(mgf(tn, 2) - mgf(tn, 1)^2, 0) |
| 21 | +end |
| 22 | + |
| 23 | +function skewness(d::Truncated{<:LogNormal}) |
| 24 | + tn = _truncnorm(d) |
| 25 | + m1 = mgf(tn, 1) |
| 26 | + m2 = mgf(tn, 2) |
| 27 | + m3 = mgf(tn, 3) |
| 28 | + sqm1 = m1^2 |
| 29 | + v = m2 - sqm1 |
| 30 | + return (m3 + m1 * (-3 * m2 + 2 * sqm1)) / (v * sqrt(v)) |
| 31 | +end |
| 32 | + |
| 33 | +function kurtosis(d::Truncated{<:LogNormal}) |
| 34 | + tn = _truncnorm(d) |
| 35 | + m1 = mgf(tn, 1) |
| 36 | + m2 = mgf(tn, 2) |
| 37 | + m3 = mgf(tn, 3) |
| 38 | + m4 = mgf(tn, 4) |
| 39 | + v = m2 - m1^2 |
| 40 | + return @horner(m1, m4, -4m3, 6m2, 0, -3) / v^2 - 3 |
| 41 | +end |
| 42 | + |
| 43 | +# TODO: The entropy can be written "directly" as well, according to Mathematica, but |
| 44 | +# the expression for it fills me with regret. There are some recognizable components, |
| 45 | +# so a sufficiently motivated person could try to manually simplify it into something |
| 46 | +# comprehensible. For reference, you can obtain the entropy with Mathematica like so: |
| 47 | +# |
| 48 | +# d = TruncatedDistribution[{a, b}, LogNormalDistribution[m, s]]; |
| 49 | +# Expectation[-LogLikelihood[d, {x}], Distributed[x, d], |
| 50 | +# Assumptions -> Element[x | m | s | a | b, Reals] && s > 0 && 0 < a < x < b] |
0 commit comments