R code for fitting a three-parameter lognormal distribution

The following code fits the three-parameter lognormal distribution to (right) censored or complete (uncensored) data in R.

The R code implements a fitting strategy proposed by Jerry Lawless in his 2003 book Statistical models and methods for lifetime data (pp. 187-188). A similar strategy is suggested by Terry Therneau in this comment.

For some data sets Lawless’ fitting strategy yields an unbounded likelihood. See this blog post for more information on the unbounded likelihood of a 3-parameter lognormal model.

Lognormal 3 parameter

The 3-parameter lognormal distribution in the R code is fitted to data reported by Meeker and Escobar in their 1998 book Statistical methods for reliability data.

Suggestions and/or questions? Please contact Stefan Gelissen (email: info at datall-analyse.nl).
See this page for an overview of all of Stefan’s R code blog posts.

R code


#Alloy data (see: Meeker & Escobar, Example 11.18, p. 279)
alloy <- read.table("http://www.datall-analyse.nl/blog_data/at7987.txt", header=T)
#failure=1, censored=0
alloy$Cens <- as.numeric(alloy$Status)-1
#expand data
alloy2 <- alloy[rep(seq_len(nrow(alloy)), alloy$Weight),]

times <- alloy2$Kilocycles
event <- alloy2$Cens

##probability plot
#check first whether fitting a 3-parameter lognormal distribution is appropriate
#that is, does a probability plot show curvature?
#more specifically, is there any concave behavior in the lower tail?
#if no such concavity is visible, then consider fitting a 2-parameter lognormal distribution
timesSorted <- times[order(times)]
eventSorted <- event[order(times)]
timesi <- unique(timesSorted[which(eventSorted==1)])

cumSurvRaw <- survfit(Surv(times, event)~ 1)
cumSurv <- unique(rep(cumSurvRaw$surv, cumSurvRaw$n.event))
cumFail <- 1-cumSurv
lagFail <- c(0, cumFail[-length(cumFail)])
Prob <- .5*(cumFail+lagFail)
yi <- qnorm(Prob)

#probability plot
plot(timesi, yi, log="x", xlab="Time", ylab="Linearized CDF")

##determine threshold of 3-parameter lognormal distribution

#profile function for threshold (=gamma)
profile.t <- function(gammahat, times, event) {
  time <- times-gammahat
  fit <- survreg(Surv(time, event) ~ 1, dist="lognormal", subset=(time>0))

#note: for an estimate of gamma which coincides with the maximum in the profile likelihood
#plot (see below) you may sometimes have to vary the value of 1e+2 (e.g., set it to 1e+3)
gammaMLE <- optim(par=0, profile.t, method="L-BFGS-B",
                  times=times, event=event)
gamma <- gammaMLE$par
names(gamma) <- "gamma"

#MLEs for mu and sigma
sgamma <- Surv(times-gamma, event)
musigma <- survreg(sgamma ~ 1, dist="lognormal", subset=(times-gamma>0))

mu <- coef(musigma)
names(mu) <- "mu"
sigma <- musigma$scale
names(sigma) <- "sigma"

#note: MLEs are identical to those reported by Meeker & Escobar, p.279
c(mu, sigma, gamma)
(loglikw <- musigma$loglik[1])

##normal-approximation confidence intervals for MLEs

#log-likelihood function of 3-parameter lognormal model
lliklnormal <- function (theta, y, d) {
  mu <- theta[1]
  sigma <- theta[2]
  gamma <- theta[3]
  subset <- y-gamma>0
  ys <- y[subset]
  ds <- d[subset]
  sum(log((( 1/(sigma*(ys-gamma)) * dnorm( ((log(ys-gamma)-mu)/sigma) )) ^ds)*
            ( (1-pnorm( ((log(ys-gamma)-mu)/sigma) )) ^(1-ds) )))

#optimization of log-likelihood
lognormal.mle <- optim(c(mu, sigma, gamma), lliklnormal,
                       method='BFGS', control=list(fnscale=-1),
                       y=times, d=event, hessian=TRUE)
#MLEs for mu, sigma, and gamma
c(lognormal.mle$par[1], lognormal.mle$par[2],lognormal.mle$par[3])
#assess whether the MLEs are identical to the start values
#(this is usually a reliable indicator of the model's stability)
c(mu, sigma, gamma)
#variance covariance matrix
#standard errors
SEparms <- sqrt(diag(solve(-1*lognormal.mle$hessian)))

#normal-approximation 95% confidence intervals
#these intervals are also known as Fisher matrix interval bounds
#95% confidence interval for mu
lognormal.mle$par[1] + c(1,-1)*qnorm(.05/2)*SEparms[1]
#95% confidence interval for sigma
#note: taking the log transformation of sigma guarrantees that the confidence
#bounds always take positive values
#95% confidence interval for gamma
lognormal.mle$par[3] + c(1,-1)*qnorm(.05/2)*SEparms[3]

##likelihood based interval for gamma

#MLE for gamma
#fixed values of gamma
gammas <- seq(0, min(times[event==1])-1e-2, length.out=100)
#likelihood profile
ll <-sapply(gammas, profile.t, times=times, event=event)

#plot profile likelihood
plot(gammas,ll, type='l', xlab=expression(gamma), ylab="log-likelihood")
#include lower limit for log-likelihood values
limit <- loglikw - .5*qchisq(.95, df=1)
#MLE of gamma
abline(v=gamma, lty=2, col="red")
#minimum of observed times
abline(v=min(times[event==1]), lty=2, col="darkgray")

#calculate the limits of the 95% confidence interval by hand
limitlkh <- function(gammas, times, event) {
  profile.t(gammas, times, event) - limit

lowerci <- uniroot(limitlkh, c(0, gamma), times=times, event=event)
upperci <- uniroot(limitlkh, c(gamma, min(times[event==1])-1e-2),
                   times=times, event=event)
c(lowerci$root, upperci$root)

##predictions of 3-parameter lognormal model

#predict quantiles (including normal-approximation confidence intervals)
lognormalQp <- function (vcov, mu, sigma, gamma, p, alpha=.05){
  Qp <- as.vector(exp(qnorm(p)*sigma + mu) + gamma)
  #take partial derivatives of the function q=exp(mu+qnorm(p)*sigma)+gamma
  dq.dmu <- exp(mu+qnorm(p)*sigma) #with respect to mu
  dq.dsigma <- qnorm(p)*exp(mu+qnorm(p)*sigma) #with respect to sigma
  dq.dgamma <- 1 #with respect to gamma
  dq.dtheta <- c(dq.dmu, dq.dsigma, dq.dgamma)
  #delta method
  seQ <- sqrt(t(dq.dtheta)%*%vcov%*%dq.dtheta)
  #compute confidence interval
  lcl <- Qp-qnorm(1-alpha/2)*seQ; ucl <- Qp+qnorm(1-alpha/2)*seQ
  c(p=p, Quantile=Qp, std.err=seQ, lcl=lcl, ucl=ucl)

#specify proportions at which to predict the quantiles
ps <- seq(.001, .999, length.out=200)
#predict quantiles at specified proportions
Qps <- data.frame(t(sapply(ps, lognormalQp, vcov=solve(-1*lognormal.mle$hessian),
                           mu=mu, sigma=sigma, gamma=gamma, alpha=.05)))

#plot predictions
plot(timesi, yi, type="p", xlab="Time", log="x", ylab="Linearized CDF",
     xlim=c(gamma, max(times)))
lines(Qps$Quantile, qnorm(Qps$p), col="blue")
#confidence intervals
lines(Qps$lcl, qnorm(Qps$p), lty=2, col="red")
lines(Qps$ucl, qnorm(Qps$p), lty=2, col="red")
#include threshold (=gamma)
abline(v=gamma, col="darkgray", lty=2)

#predict cumulative proportions (including normal-approximation confidence intervals)
lognormalFx <- function (vcov, mu, sigma, gamma, x, alpha=.05){
  Fx <- as.vector(pnorm( (log(x-gamma)-mu ) / sigma))
  Zx <- qnorm(Fx)
  #take partial derivatives of the function z=(log(x-gamma)-mu)/sigma
  dz.dmu <- -1/sigma #with respect to mu
  dz.dsigma <- (-1/sigma)*Zx #with respect to sigma
  dz.dgamma <- (-1/sigma)*(1/(x-gamma)) #with respect to gamma
  dz.dtheta <- c(dz.dmu, dz.dsigma, dz.dgamma)
  #delta method
  seZ <- sqrt(t(dz.dtheta)%*%vcov%*%dz.dtheta)
  #compute confidence interval
  w <- qnorm(1-alpha/2)*seZ
  lcl <- (pnorm(Zx-w)); ucl <- (pnorm(Zx+w))
  c(x=x, Fhat=Fx, std.err=seZ, lcl=lcl, ucl=ucl)

#specify times at which to predict the cumulative proportions
xs <- seq(min(times[event==1]), max(times[event==1]), length.out=200)
#predict cumulative proportions at specified times
Fxs <- data.frame(t(sapply(xs, lognormalFx, vcov=solve(-1*lognormal.mle$hessian),
                           mu=mu, sigma=sigma, gamma=gamma, alpha=.05)))

#plot predictions
plot(timesi, yi, type="p", xlab="Time", log="x", ylab="Linearized CDF",
     xlim=c(gamma, max(times)))
lines(Fxs$x, qnorm(Fxs$Fhat), col="blue")
#confidence intervals
lines(Fxs$x, qnorm(Fxs$lcl), lty=2, col="red")
lines(Fxs$x, qnorm(Fxs$ucl), lty=2, col="red")
#include threshold (=gamma)
abline(v=gamma, col="darkgray", lty=2)