################################## Estimation of pi via monte carlo integration set.seed(1) #op=par(mfrow=c(2,2), mar=rep(1,4)) #for(it in 1:4){ # n=1e4 x=runif(n,-1,1) h=function(x){ 4*sqrt(1 - x^2) } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi plot(estint, type='l', ylim=c(2.5,3.8)) ## approx. 95% confidence intervals lines(estint + 2*esterr, lty=2, col="darkgrey") lines(estint - 2*esterr, lty=2, col="darkgrey") ## the true value of pi abline(h=pi, col=2) #} #par(op) ################################## Estimation of mean of a Student distribution estimate.and.plot=function(){ set.seed(1) op=par(mfrow=c(2,2), mar=rep(1,4)) for(it in 1:4){ # n=1e4 x=rt(n, df=df) h=function(x){ x } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi plot(estint, type='l', ylim=ylim) ## approx. 95% confidence intervals lines(estint + 2*esterr, lty=2, col="darkgrey") lines(estint - 2*esterr, lty=2, col="darkgrey") lines(sqrt(1:n)*esterr, lty=1, col="green") ## the estimated sqrt(variance) -- this should converges if CLT holds ## the true value of pi abline(h=0, col=2) } par(op) } ## finite variance case df=3 ylim=c(-1,1) ylim=c(-1,2) ## to see the estimation of the sqrt(var) estimate.and.plot() ## limiting case, variance is infinite df=2 ylim=c(-1,1) ylim=c(-1,4) ## to see the estimation of the sqrt(var) estimate.and.plot() ## infinite variance df=1.5 ylim=c(-2,2) ylim=c(-2,8) ## to see the estimation of the sqrt(var) estimate.and.plot() ################################### Estimation of tail probability of a Cauchy ## cutoff=2 ## we want to estimate P(X < cutoff) when X is Gaussian true.value=1-pcauchy(cutoff) n=1e4 ylim=true.value+c(-1e-2,1e-2) ## ## Naive Monte Carlo set.seed(1) #op=par(mfrow=c(2,2), mar=rep(1,4)) #for(it in 1:4){ # x=rcauchy(n) h=function(x){ return(x > cutoff) } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi plot(estint, type='l', ylim=ylim) ## approx. 95% confidence intervals lines(estint + 2*esterr, lty=2, col="darkgrey") lines(estint - 2*esterr, lty=2, col="darkgrey") #lines(sqrt(1:n)*esterr, lty=1, col="green") ## the estimated sqrt(variance) -- this should converges if CLT holds ## the true value of pi abline(h=true.value, col=2) #} #par(op) ## how many y==1? print(sum(y==1)) ## Using the symmetry set.seed(1) #op=par(mfrow=c(2,2), mar=rep(1,4)) #for(it in 1:4){ # x=runif(n,0,2) h=function(x){ return(2/(pi*(1+x^2))) } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi plot(.5 - estint, type='l', ylim=ylim) ## approx. 95% confidence intervals lines(.5 - (estint + 2*esterr), lty=2, col="darkgrey") lines(.5 - (estint - 2*esterr), lty=2, col="darkgrey") #lines(sqrt(1:n)*esterr, lty=1, col="green") ## the estimated sqrt(variance) -- this should converges if CLT holds ## the true value of pi abline(h=true.value, col=2) } #par(op) ## how many y==1? print(sum(y==1)) ## Using a change of variables set.seed(1) #op=par(mfrow=c(2,2), mar=rep(1,4)) #for(it in 1:4){ # x=runif(n,0,0.5) h=function(x){ return(1/(2*pi*(1+x^2))) } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi plot(estint, type='l', ylim=ylim) ## approx. 95% confidence intervals lines((estint + 2*esterr), lty=2, col="darkgrey") lines((estint - 2*esterr), lty=2, col="darkgrey") #lines(sqrt(1:n)*esterr, lty=1, col="green") ## the estimated sqrt(variance) -- this should converges if CLT holds ## the true value of pi abline(h=true.value, col=2) #} #par(op) ## how many y==1? print(sum(y==1)) ######## show estimates simultaneously set.seed(1) #op=par(mfrow=c(2,2), mar=rep(1,4)) #for(it in 1:4){ # naive method x=rcauchy(n) h=function(x){ return(x > cutoff) } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi plot(estint, type='l', ylim=ylim) ## approx. 95% confidence intervals #lines(estint + 2*esterr, lty=2, col=1) #lines(estint - 2*esterr, lty=2, col=1) #lines(sqrt(1:n)*esterr, lty=1, col="green") ## the estimated sqrt(variance) -- this should converges if CLT holds ## the true value of pi abline(h=true.value, col=2) # ### Using the symmetry x=runif(n,0,2) h=function(x){ return(2/(pi*(1+x^2))) } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi lines(.5 - estint, type='l', ylim=ylim, col='blue') ## approx. 95% confidence intervals #lines(.5 - (estint + 2*esterr), lty=2, col=2) #lines(.5 - (estint - 2*esterr), lty=2, col=2) # ### using a change of variables x=runif(n,0,0.5) h=function(x){ return(1/(2*pi*(1+x^2))) } y=h(x) estint=cumsum(y)/(1:n) esterr=sqrt(cumsum((y-estint)^2))/(1:n) ## plot the estimated value of pi lines(estint, type='l', ylim=ylim, col=3) ## approx. 95% confidence intervals #lines((estint + 2*esterr), lty=2, col=3) #lines((estint - 2*esterr), lty=2, col=3) #} ############################## Importance sampling: estimating tail prob of Gaussian true.val=pnorm(-4.5) ylim=true.val+c(-1e-5, 1e-5) #op=par(mfrow=c(2,2), mar=rep(1,4)) #for(it in 1:4){ n=1e4 x=rnorm(n) hx= (x > 4.5) y=rexp(n)+4.5 wi=dnorm(y)/dexp(y-4.5) plot(cumsum(hx)/(1:n), type='l', ylim=ylim) lines(cumsum(wi)/(1:n), col='blue') abline(h=true.val, col=2) #} #par(op) ############################## Importance sampling: estimating tail prob of Cauchy true.val=pcauchy(-4.5) #### Using a Exponential importance distn ylim=true.val+c(-1e-1, 1e-1) op=par(mfrow=c(2,2), mar=rep(1,4)) for(it in 1:4){ n=1e3 x=rcauchy(n) hx= (x > 4.5) y=rexp(n)+4.5 wi=dcauchy(y)/dexp(y-4.5) plot(cumsum(hx)/(1:n), type='l', ylim=ylim) lines(cumsum(wi)/(1:n), col='blue') abline(h=true.val, col=2) } par(op) ## this fails because the tail of exp are lighter than a Cauchy x=(1:30) plot(dcauchy(x)/dexp(x-4.5))