## random.start() ## will repeat mle2() from different starting conditions and report best fit # ll and guess are the support function and starting list from mle2() # lower.guess and upper.guess define the area in which random starting conditions will be selected # n is the number of random of times to select random starting conditions mle.random <- function (ll, guess, lower.guess = rep(-1,length(guess)), upper.guess = rep(1,length(guess)), n = 100 ) { random.results <- NULL; no.vars <- length(guess); for(i in 1:n) { for(j in 1:no.vars) { guess[j] = runif(1,lower.guess[j],upper.guess[j]) } fit <- mle2(ll, start=guess, method="Nelder-Mead"); random.results <- rbind(c(-logLik(fit),coef(fit)),random.results) } min.result <- which.min(random.results[,1]); ran <- random.results[min.result,1:(no.vars+1)]; guess <- as.list(ran[2:length(ran)]); fit <- mle(ll, start=guess, method="Nelder-Mead"); fit } ##example of use #ll <- function (m1,s) #-sum(dnorm(bristol$infection,m1,s,log = TRUE)) #guess <- list(m1 = 0.3, s = 0.1) #fit <- mle(ll, start=guess, method="Nelder-Mead") #summary(fit) #fit <- mle.random(ll,guess,c(-3,0),c(3,10),100) #summary(fit) # #current problem: Nelder-Mead method in mle and optim does not allow the use of lower and upper constraints #consequently, a number of warnings are produced when using this function # profpic() modified base on code originally from Ben Bolker # #usage: # #prof <- profile(fit.saturating) #profpic(prof,"h",best=fit.saturating) profpic <- function(prof,param,trueval,best,which,alpha=c(0.95,0.99), legend=FALSE,legpos=NULL,ylab="negative log likelihood",...) { prof1 = prof@profile[[param]] nll = (prof1$z)^2/2+prof@summary@m2logL/2 plot(prof1$par.vals[,param],nll,type="l",xlab=param,ylab=ylab,...) abline(h=-logLik(best)) abline(v=coef(best)[param],lwd=2) if (!missing(trueval)) abline(v=trueval,lwd=2,lty=2) nalpha = length(alpha) for (i in seq(along=alpha)) { crit <- qchisq(alpha[i],1)/2 abline(h=-logLik(best)+crit,lty=i+1) conflim <- confint(prof,parm=param,level=alpha[i]) abline(v=conflim,lty=i+1) } if (legend) { if (is.null(legpos)) legpos <- corner.loc(1,1,xoff=0.2) legend(legpos$x,legpos$y, c(paste("alpha=",alpha,sep=""), "true", "observed"), lty=c(1:nalpha,2,1), lwd=c(rep(1,nalpha),2,2),bg="white") } } # The functions below this point are no longer used. They have been replaced # by functions in the bbmle packages such as AICtab(). ## compare() ## will compare model to other models using AIC-based metrics # fit is the output from mle() # prev is the output from a previous compare() so that a list of models can be analyzed # sorted = TRUE means the output will be sorted from best model to worst model # default method is "AIC", but "AICc" can also be selected, in which case you'll need to enter # the number of data points n compare <- function (fit, name = "", prev = NULL, sorted = TRUE, method = "AIC", n = NULL) { comp <- prev[1:3]; comp <- rbind(comp,list(-logLik(fit),length(coef(fit)),name)); colnames(comp) <- c("negLogLik","params","model"); comp <- data.frame(comp); comp$negLogLik <- as.numeric(comp$negLogLik); comp$params <- as.integer(comp$params); comp<-cbind(comp,AIC=2*comp$negLogLik + 2* comp$params); if(method == "AIC") { comp<-cbind(comp,delta=comp$AIC-min(comp$AIC)); } else if(method == "AICc") { if(is.null(n)) { stop("For AICc, need to specify the sample size n") } comp<-cbind(comp,AICc=comp$AIC+2*comp$params*(comp$params+1)/(n-comp$params-1)); comp<-cbind(comp,delta=comp$AICc-min(comp$AICc)); } comp<-cbind(comp,akaike.weights=exp(-0.5*comp$delta)/sum(exp(-0.5*comp$delta))); comp<-cbind(comp,evid.ratio=exp(0.5*comp$delta)); if(sorted && method == "AIC") { comp<-comp[sort.list(comp$AIC),]; } else if(sorted && method == "AICc") { comp<-comp[sort.list(comp$AICc),]; } row.names(comp) <- 1:dim(comp)[1] # re-number so correct for use in compare.likRatio comp } # ##example of use #comp <- compare(fit, name="A") #comp <- compare(fit, name="B", prev = comp) #compare(fit, name="C", prev = comp, method="AICc", n = length(bristol)) # #future: this could be expanded to other indices like BIC, etc. # #to do this correctly, I think I need to define a method of making lists of mle objects #so that the logLikelihood information is passed with the fit #compare <- function (models, sorted = TRUE, method = "AIC", n = NULL) { # comp <- rbind(comp,list(-logLik(fit),length(coef(fit)),name)); # # colnames(comp) <- c("negLogLik","params","model"); # comp <- data.frame(comp); # comp$negLogLik <- as.numeric(comp$negLogLik); # comp$params <- as.integer(comp$params); # comp<-cbind(comp,AIC=2*comp$negLogLik + 2* comp$params); # if(method == "AIC") { # comp<-cbind(comp,delta=comp$AIC-min(comp$AIC)); # } # else if(method == "AICc") { # if(is.null(n)) { # stop("For AICc, need to specify the sample size n") # } # comp<-cbind(comp,AICc=comp$AIC+2*comp$params*(comp$params+1)/(n-comp$params-1)); # comp<-cbind(comp,delta=comp$AICc-min(comp$AICc)); # } # comp<-cbind(comp,akaike.weights=exp(-0.5*comp$delta)/sum(exp(-0.5*comp$delta))); # comp<-cbind(comp,evid.ratio=exp(0.5*comp$delta)); # if(sorted && method == "AIC") { # comp<-comp[sort.list(comp$AIC),]; # } # else if(sorted && method == "AICc") { # comp<-comp[sort.list(comp$AICc),]; # } # row.names(comp) <- 1:dim(comp)[1] # re-number so correct for use in compare.likRatio # comp #} #models <- NULL #c(models,name=list(fit)) # #future: this should be expanded to accept compare(fit1,fit2) or even compare( A=fit1, B=fit2 ) #but perhaps it will have to be compare( list(A=fit1,B=fit2) ), which is what it should already do # # ## compare.likeRatio() ## will do a likelihood Ratio test between model i and j # note, this function does not test that models are nested, assumes user has confirmed that they are nested compare.likRatio <- function (compare.object, i, j) { comp <- compare.object; degfree <- abs(comp$params[i]-comp$params[j]); r <- abs(2*(comp$negLogLik[i]-comp$negLogLik[j])); 1-pchisq(r, df=degfree) } ##example of use #compare.likRatio(comp,2,3) # #future: should be expanded to take model names for i and j