R code for fitting a piecewise NHPP model

The following R code implements a piecewise Weibull NHPP model. In the context of repairable systems a Weibull NHPP model is also known as the Crow-AMSAA model.

Technical details of the piecewise NHPP model are given in: Guo, H., Mettas, A., Sarakakis, G., and Niu, P. (2010), Piecewise NHPP Models with Maximum Likelihood Estimation for Repairable Systems, Reliability and Maintainability Symposium (RAMS), 2010 Proceedings.

See Examples 2 and 3 of this blog post for an alternative way of fitting a piecewise NHPP model.

Suggestions and/or questions? Please contact Stefan Gelissen (email: info at datall-analyse.nl).
See this page for an overview of all of Stefan’s R code blog posts.

R code

library(car)
library(bbmle)

#data reported by Guo et al. (2010)
#cumulative time
ct <- c(15.70, 29.39, 41.14, 56.47, 75.61, 98.83, 112.42, 125.61,
        129.39, 133.45, 138.94, 141.41, 143.67, 144.63, 144.95,
        145.16, 146.25, 146.70, 147.26, 148.15, 152.40)
#cumulative failures
cf <- 1:length(ct)

#plot data
plot(ct, cf, ylab="Cumulative failures", xlab="Cumulative time")



#detect change point

#log-likelihood function for piecewise NHPP model
llikNHPPc <- function (cp, t, Time) {
  t1 <- t[which(t<=cp)]
  t2 <- t[which(t>cp)]
  n1 <- length(t1)
  n2 <- length(t2)
  n <- length(t)
  b1 <- n1 / (n1*log(cp) - sum(log(t1)))
  b2 <- n2 / (n*log(Time) - n1*log(cp) - sum(log(t2)) )
  l1 <- n / (cp^(b1-b2)*Time^b2)
  
  n*log(l1) + n1*log(b1) + n2*log(b2) + n2*(b1-b2)*log(cp) +
    (b1-1)*sum(log(t1)) + (b2-1)*sum(log(t2)) - l1*(cp^(b1-b2))*Time^b2
}

#behavior of log-likelihood when varying the change point value
cps <- seq(50, 145, length.out=30)
ll <- sapply(cps, llikNHPPc, Time=max(ct), t=ct)
plot(cps,ll, type='o', xlab="Change Point", ylab="log-likelihood")

#optimization of log-likelihood
#note: for the Guo et al. data employ 120 as a start value for
#the change point in the optimization algorithm
nhpp.mle <- optim(c(120), llikNHPPc,
                  method='BFGS', control=list(fnscale=-1),
                  Time=max(ct), t=ct)

#change point
nhpp.mle$par

#alternative 1: automatic detection of change point
sv <- cps[which.max(ll)]
nhpp.mle2 <- optim(sv, llikNHPPc,
                   method='BFGS', control=list(fnscale=-1),
                   Time=max(ct), t=ct)

#change point
nhpp.mle2$par

#alternative 2: automatic detection of change point using Brent's algorithm
#note that Brent's algorithm is fairly robust in case of
#irregular/discontinuous functions
nhpp.mle3 <- optim(c(50), llikNHPPc,
                   method='Brent', control=list(fnscale=-1),
                   lower=50, upper=145,
                   Time=max(ct), t=ct)

#change point
nhpp.mle3$par

#alternative 3: automatic detection of change point using particle swarm optimization
library(pso)
nhpp.mle4 <- psoptim(NA, llikNHPPc,
                     control=list(fnscale=-1, maxit=1000, s=10),
                     lower=50, upper=145,
                     Time=max(ct), t=ct)

#change point
nhpp.mle4$par



#MLEs for parameters of piecewise NHPP model
failuredata <- data.frame(ct,cf)
CP <- nhpp.mle$par
MaxTime <- max(failuredata$ct)
segment1 <- failuredata[which(ct<=CP),]
n1 <- length(segment1$cf)
segment2 <- failuredata[which(ct>CP & ct<=MaxTime),]
n2 <- length(segment2$cf)
n <- length(failuredata$ct)

#MLE beta of segment 1
beta1 <- n1 / (n1*log(CP) - sum(log(segment1$ct)))

#MLE beta of segment 2
beta2 <- n2 / (n*log(max(ct)) - n1*log(CP) - sum(log(segment2$ct)) )

#MLE for lambda of segment 1
lambda1 <- n / (CP^(beta1-beta2)*max(ct)^beta2)

#MLE for lambda of segment 2
lambda2 <- lambda1*(CP^(beta1-beta2))

#summary of MLEs
round(c(beta1, beta2, lambda1, lambda2),4)


#an easy way for obtaining the variance covariance matrix is by
#optimization of the log-likelihood function of the piecewise NHPP model

#log-likelihood function of piecewise NHPP model
#note: the log transformation of the parameters ensures that the limits of the Fisher matrix
#confidence bounds do not take negative values
llikNHPP2 <- function (lambda1=log(lambda1), beta1=log(beta1),
                       beta2=log(beta2), cp=log(CP)) {
  l1 <- exp(lambda1)
  b1 <- exp(beta1)
  b2 <- exp(beta2)
  cp <- exp(cp)
  t1 <- t[which(t<=cp)]
  t2 <- t[which(t>cp)]
  n1 <- length(t1)
  n2 <- length(t2)
  n <- n1+n2
  -(n*log(l1) + n1*log(b1) + n2*log(b2) + n2*(b1-b2)*log(cp) +
      (b1-1)*sum(log(t1)) + (b2-1)*sum(log(t2)) -
      l1*(cp^(b1-b2))*TE^b2)
}

#optimization of log-likelihood
NHPP.mle <- mle2(minuslogl=llikNHPP2, optimizer="optim", method="BFGS",
                 data=list(TE=MaxTime, t=ct), fixed=list(cp=log(CP)))

#MLEs of parameters
exp(coef(NHPP.mle))
#log-likelihood of model
(loglikModel <- -NHPP.mle@min)
#variance covariance matrix
vcovNHPP <- exp(coef(NHPP.mle)[1:3])%*%t(exp(coef(NHPP.mle)[1:3]))*
  vcov(NHPP.mle)
rownames(vcovNHPP) <- colnames(vcovNHPP)
round(vcovNHPP, 8)
#standard errors of parameters
(stErr <- sqrt(diag(vcovNHPP)))
#normal-approximation 95% confidence intervals (=quadratic approximation)
#these intervals are also called Fisher matrix confidence bounds
#note: these are the intervals as calculated by Guo et al. (2010)
round(exp(confint(NHPP.mle, level=0.95, method="quad")), 4)



#it is also possible to calculate likelihood based intervals for the model parameters
round(exp(confint(NHPP.mle, level=0.95)), 4)

#likelihood plot for beta2
#(likelihood plots for lambda1 and beta2 are constructed in exactly the same way)
profile.parm <- function(ps) {
  plkmu <- rep(0, length(ps))
  
  for (i in 1:length(ps)) {
    temp <- mle2(minuslogl=llikNHPP2, optimizer="optim", method="BFGS",
                 data=list(TE=MaxTime, t=ct),
                 fixed=list(beta1=ps[i], cp=log(CP)))
    plkmu[i] <- -temp@min
  }
  plkmu
}

#fixed values for Beta1
ps <- log(seq(0.4, 2.2, length.out=30))

#likelihood profile
ll <- profile.parm(ps)

#plot likelihood profile
plot(exp(ps),ll, type='l', xlab=bquote(beta["2"]), ylab="log-likelihood")
#include lower limit for log-likelihood values
limit <- loglikModel - .5*qchisq(.95, df=1)
abline(h=limit,col="blue",lty=2)
#95% confidence limits of likelihood based interval
#note: these limits were obtained with confint() above
abline(v=c(0.4769, 2.1465), col="red", lty=2)



#test whether the beta's of the two segments are significantly
#different from each other
#in other words, is beta1-beta=0 or not?
parms <- c(lambda1, beta1, beta2)
names(parms) <- c("lambda1", "beta1", "beta2")
dif <- deltaMethod(object=parms, g="beta2-beta1", vcov.=vcovNHPP)
estdif <- dif$Estimate
SEdif <- dif$SE
#construct 95% confidence of beta2-beta1 (=difference in beta's)
#and check whether or not the value zero lies in the interval
estdif + c(1,-1)*qnorm(.05/2)*SEdif
#in case of the Guo et al. data, zero lies not in the 95% CI-interval
#thus conclude that the two beta's are significantly different from each other



#95% confidence interval for lamda2 (based on normal-approximation)

#apply delta method for obtaining variance of lambda2
parms <- c(lambda1, beta1, beta2)
names(parms) <- c("lambda1", "beta1", "beta2")
SElambda2 <- deltaMethod(object=parms, g="lambda1*CP^(beta1-beta2)",
                         vcov.=vcovNHPP)$SE
#variance lambda2
(varLambda2 <- SElambda2^2)

#95% confidence interval
lambda2*exp((c(1,-1)*qnorm(.05/2)*SElambda2)/lambda2)



#95% confidence intervals for cumulative failures (based on normal-approximation)

##Segment 1

#apply delta method for obtaining variance estimates of cumulative failures
ctsegment1 <- c(segment1$ct, CP)
SE1 <- vector("numeric", length(ctsegment1))

for (i in 1:length(ctsegment1)){
  t <- ctsegment1[i]
  SE1[i] <- deltaMethod(object=parms, g="lambda1*t^beta1", vcov.=vcovNHPP,
                        constants=c(t=t))$SE}
#predicted cumulative failures
cfpred1 <- lambda1*ctsegment1^beta1

#95% confidence intervals of cumulative failures
s1lci <- cfpred1*exp((qnorm(.025)*SE1)/cfpred1)
s1uci <- cfpred1*exp((qnorm(.975)*SE1)/cfpred1)


##Segment 2

#for segment 2 it is necessary to calculate the
#covariance between lambda2 and beta2

#apply delta method for obtaining covariance between lambda2 and beta2
dldl1 <- deriv(~ lambda1*CP^(beta1-beta2), "lambda1")
dldb1 <- deriv(~ lambda1*CP^(beta1-beta2), "beta1")
dldb2 <- deriv(~ lambda1*CP^(beta1-beta2), "beta2")
gl2 <- c(attr(eval(dldl1),"gradient"), attr(eval(dldb1),"gradient"),
         attr(eval(dldb2),"gradient"))
gb2 <- c(0,0,1)
#covariance between lambda2 and beta2
(covLambda2Beta2 <- t(gl2)%*%vcovNHPP%*%gb2)
#covariance matrix lambda2 and beta2
vcovl2b2 <- matrix(nrow=2,ncol=2)
colnames(vcovl2b2) <- rownames(vcovl2b2) <- c("lambda2", "beta2")
vcovl2b2[1,1] <- varLambda2
vcovl2b2[1,2] <- vcovl2b2[2,1] <- covLambda2Beta2
vcovl2b2[2,2] <- vcovNHPP[3,3]

#apply delta method for obtaining variance estimates of cumulative failures
ctsegment2 <- c(CP, segment2$ct)
SE2 <- vector("numeric", length(ctsegment2))

parms2 <- c(lambda2, beta2)
names(parms2) <- c("lambda2", "beta2")

for (i in 1:length(ctsegment2)){
  t <- ctsegment2[i]
  SE2[i] <- deltaMethod(object=parms2, g="lambda2*t^beta2", vcov.=vcovl2b2,
                        constants=c(t=t))$SE}

#predicted cumulative failures
cfpred2 <- lambda2*ctsegment2^beta2

#95% confidence intervals of cumulative failures
s2lci <- cfpred2*exp((qnorm(.025)*SE2)/cfpred2)
s2uci <- cfpred2*exp((qnorm(.975)*SE2)/cfpred2)



#plot data with predicted values and 95% confidence intervals
plot(ct, cf, ylim=c(0,30),
     ylab="Cumulative failures", xlab="Cumulative time")
#predicted values
lines(x=ctsegment1, y=cfpred1, col="blue", lwd=2)
lines(x=ctsegment2, y=cfpred2, col="blue", lwd=2)
#95% confidence intervals segment 1
lines(x=ctsegment1, y=s1lci, col="blue", lty=2)
lines(x=ctsegment1, y=s1uci, col="blue", lty=2)
#95% confidence intervals segment 2
lines(x=ctsegment2, y=s2lci, col="blue", lty=2)
lines(x=ctsegment2, y=s2uci, col="blue", lty=2)
#include change point
abline(v=CP, col="red", lty=2)