# bruit blanc melange de 3 normales bruit<- function (n, m0=0, sig0=0.5, m1=3, sig1=1., p=0.05) { eps <- rep(0,n) for (i in 1:n) { u <- runif(1) u if ( u < p ) {eps[i] <- rnorm(1,mean=m1,sd=sig1)} if ( (p <= u) & (u < 2*p) ) {eps[i] <- rnorm(1,mean=-m1,sd=sig1)} if ( u > 2*p ) {eps[i] <- rnorm(1,mean=m0,sd=sig0)} } as.ts(eps) } # fonction qui simule un STAR(1) logistique star.sim <- function (n, a1=-0.95, a2=0.9, g0=5.0, burnin=100) { y <- rep(0,n+burnin) eps <- bruit(n+burnin) for (i in 2:(n+burnin)) {y[i] <- eps[i] + a1*y[i-1]+(a2-a1)*y[i-1]/(1+exp(-g0*y[i-1])) as.ts(y[(burnin+1):(n+burnin)]) }} # simulation n <- 2000 set.seed(7) a1<- -0.95 a2<- 0.9 g0<- 5. y <- star.sim(n, a1=a1, a2=a2, g0=g0) # tracer la regression op <- par(mfrow = c(3, 2)) # 3 x 2 pictures on one plot plot(y,ylab=expression(y[t]),xlab="Time t") title(expression(paste('(a) Simulation (',Y[t],') of a STAR'))) abs<- seq(min(y),max(y),len=100) ord<-(a1+(a2-a1)/(1+exp(-g0*abs)))*abs plot(min(y):max(y),min(y):max(y),type='n', ylab=expression(paste('E(',Y[t],'|',Y[t-1],'=y)')), xlab='y') lines(abs,ord) x<-y[c(2:(n-1))] z<-y[c(3:n)] points(x,z,pch=20) {title(expression(paste(' (b) Points (',Y[t-1],',',Y[t],') and regression function')))} par(op)# # estimation frequentiste d'un modele LSTAR(1) library(tsDyn) #vignette("tsDyn") # fit a STAR model to the time series y fittedstar<-lstar(y,m=1,thDelay=0,control=list(maxit=3000)) names(fittedstar$coefficients)<-c("a0","a1","b0","b1","gamma","c") summary(fittedstar) ## Algorithme Metropolis-Hastings # définition du log de la loi a posteriori (à une constante additive près) logpost<-function(theta, y){ n<-length(y);a0<-theta[1];a1<-theta[2];b0<-theta[3];b1<-theta[4] gamma<-theta[5]; c<-0; sig2<-theta[6] if ( gamma<=0 ) return(-Inf); if ( sig2<=0 ) return(-Inf) logi <- 1/(1+exp(-gamma*(y[1:(n-1)]-c))) ychap <- (a0+a1*y[1:(n-1)])*(1-logi) + (b0+b1*y[1:(n-1)])*logi loglike <- -sum((y[2:n]-ychap)^2/(2*sig2))-(n-1)*log(sig2)/2 logprior<- -(a0^2+a1^2+b0^2+b1^2)/2 - gamma -2*log(sig2)-1/sig2 return(loglike+logprior)} set.seed(42) library(mcmc) # valeurs initiales theta.init0<-{as.vector(c(fittedstar$coefficients[1:5], var(residuals(fittedstar),na.rm=T)))} # premier essai mcmcresults <- metrop(logpost, theta.init0, 3000, scale=0.02, y=y) mcmcresults$accept chaine<-ts(mcmcresults$batch) dimnames(chaine)[[2]]<-c("a0","a1","b0","b1","gamma","sig2") plot(chaine,main="Markov chain simulated by Metropolis") acf(chaine) # la composante gamma ne semble pas stationnaire # second essai mcmcresults <- metrop(mcmcresults, nbatch=30000, scale=c(0.03,0.03,0.03,0.03,0.4,0.02),y=y) mcmcresults$accept chaine<-ts(mcmcresults$batch) dimnames(chaine)[[2]]<-c("a0","a1","b0","b1","gamma","sig2") plot(chaine,main="Markov chain simulated by Metropolis") acf(chaine) # plus de non stationnarité flagrante apply(mcmcresults$batch,2, mean) chaine <- as.data.frame(chaine) rm(a0,a1,b0,b1,gamma,sig2) attach(chaine) plot(ts(a1)) str(chaine) ls() op <- par(mfrow = c(2, 2), ask=T) # 2 x 2 figures par page {hist(a0, col='light blue', probability=T, main=expression(paste('Posterior distribution of ',a[0])))} lines(density(a0), col='red', lwd=3) {hist(a1, col='light blue', probability=T, main=expression(paste('Posterior distribution of ',a[1])))} lines(density(a1), col='red', lwd=3) {hist(b0, col='light blue', probability=T, main=expression(paste('Posterior distribution of ',b[0])))} lines(density(b0), col='red', lwd=3) {hist(b1, col='light blue', probability=T, main=expression(paste('Posterior distribution of ',b[1])))} lines(density(b1), col='red', lwd=3) {hist(gamma, col='light blue', probability=T, main=expression(paste('Posterior distribution of ',gamma)))} lines(density(gamma), col='red', lwd=3) {hist(sig2, col='light blue', probability=T, main=expression(paste('Posterior distribution of ',sigma^2)))} lines(density(sig2), col='red', lwd=3) par(op) # retour aux valeurs par défaut des paramètres graphiques