# ======================================================================= # ESTIMATION METHODS: MAXIMIZE LOG-LIKELIHOOD FUNCTION # ======================================================================= # ================================================================= # GOLDEN SECTION SEARCH ALGORITHM IN R # ================================================================= golden <- function (f, a, b, tol = 0.0000001) { ratio <- 2 / (sqrt(5) + 1) x1 <- b - ratio * (b - a) x2 <- a + ratio * (b - a) f1 <- f(x1) f2 <- f(x2) while(abs(b - a) > tol) { if (f2 > f1) { b <- x2 x2 <- x1 f2 <- f1 x1 <- b - ratio * (b - a) f1 <- f(x1) } else { a <- x1 x1 <- x2 f1 <- f2 x2 <- a + ratio * (b - a) f2 <- f(x2) } } return((a + b) / 2) } # ================================================================= # ================================================================= # Maximizer based on the golden search method # ================================================================= # Function which finds the maximizer of a function # using the golden section search goldenmax <- function(f, a, b, tol = 0.0000001) { golden(function(x) -f(x), a, b, tol = 0.0000001) } # ================================================================= # ================================================================= # MAXIMUM LIKELIHOOD EXAMPLES # ================================================================= # EXAMPLE 1: One-dimensional problem: estimation of the mean # ========================================================== # Compute Normal Log-likelihood # ============================= # simulate normal data y<- rnorm(10000, mean = 50, sd = 5) mean(y) sqrt(var(y)) plot(density(y),type='l') # log-likelihood function normal.loglikelihood <- function(m,data=y,s2=25){ n<-length(data) res <- -0.5*n*log(2*pi)-0.5*n*log(s2)-0.5*sum((data-m)^2)/s2 return(res) } # run golden search res <- goldenmax(normal.loglikelihood, 20, 100) res log_lik_1 <- normal.loglikelihood(res,y,25) log_lik_1 # One Dimensional Optimization resopt <- optimize(normal.loglikelihood, lower=20, upper=100, maximum=TRUE ) resopt # ================================================================= # MAXIMUM LIKELIHOOD EXAMPLES # ================================================================= # EXAMPLE 2: Two-dimensional problem: # estimation of mean, and standard deviation # ================================================================= # Compute log-likelihood function normal.loglikelihood2<-function(x,data=y){ n<-length(data) res <- -(-0.5*n*log(2*pi)-0.5*n*log(x[2]^2)-0.5*sum((data-x[1])^2)/(x[2]^2)) return(res) } mean(y) sqrt(var(y)) # ================================================================= # Routine Optim: General purpose optimization # ================================================================= x0 <- matrix (c(5, 25), nrow=2, byrow=TRUE) x0 r1 <- optim(x0, normal.loglikelihood2, method = c("Nelder-Mead")) r1$par r1$value r1$convergence r2 <- optim(x0, normal.loglikelihood2, method = c("BFGS")) r2$par r2$value r2$convergence r3 <- optim(x0, normal.loglikelihood2, method = c("CG")) r3$par r3$value r3$convergence log_lik_2 <- normal.loglikelihood2(r1$par,y) log_lik_2 # ================================================================= # Routine Optimx: General purpose optimization # ================================================================= install.packages("optimx") library(optimx) par <- c(5,25) r1 <- optimx(par, normal.loglikelihood2, method = "Nelder-Mead") r1 #Conjugate gradients r2 <- optimx(par, normal.loglikelihood2, method = "CG") r2 #BFGS r3 <- optimx(par, normal.loglikelihood2, method = "BFGS") r3 # different methods rall <- optimx(par, normal.loglikelihood2, method = c("Nelder-Mead", "CG", "BFGS", "nlm")) rall # ================================================================= # ================================================================= # ================================================================= # EXAMPLE 3 : RANDOM WALK WITH DRIFT # y(t) = m + y(t-1) + e(t), e(t)~N(0,sigma2) # ================================================================= # ================================================================= # simulate values with parameters m=0.05, sigma=1 # set seed set.seed(1010) # simulate data from Random walk model with drift # number of data points i.e. sample size T <- 10000 # true parameter values m <- 0.05 sigma <- 1 # vector with zeros data1<-integer(T) # simulate data for(t in 1:T){ if(t==1){ data1[t] <- rnorm(1,m,sigma) }else{ data1[t] <- rnorm(1,m+data1[t-1],sigma) } } # plot simulated data plot(data1, type="l") # CLASSICAL APPROACH TO INFERENCE # maximize log-likelihood or minimize -log-likelihood negative_log_likRW <- function(parameter,y){ # -log likelihood for RW model m <- parameter[1] sigma <- parameter[2] y_lagged <- y[1:(length(y)-1)] yt <- y[2:length(y)] n <- length(yt) -( -(n/2)*log(sigma^2) - sum((yt-m-y_lagged)^2)/(2*(sigma^2)) ) } # help for optim ? optim # ESTIMATE MODEL PARAMETERS USING OPTIM # L-BFGS-B allows to give lower and upper constraints for the parameters estimateRW <- optim(par=c(0.5,0.3), fn=negative_log_likRW, y=data1, upper = c(Inf,Inf), lower=c(-Inf,0), method='L-BFGS-B') estimateRW$par #[1] 0.03855965 0.99750386 estimateRW$convergence #[1] 0 # fit a linear model xx <- data1[1:(length(data1)-1)] yy <- data1[2:length(data1)] lm(yy~1+xx) #Call: # lm(formula = yy ~ 1 + xx) #Coefficients: # (Intercept) xx # 0.04218 0.99998 # ================================================================= # =================================================================