Identification of ARMA processes

Last week (in the MAT8181 course) in order to identify the orders of an ARMA process, we’ve seen the eacf method, and I mentioned the scan method, introduced in Tsay and Tiao (1985). The code below – to produce the output of the scan procedure – has been adapted from an old code by Steve Chen (where I included a visualization of the p-values, with the following colors)

The procedure was described in the course, last Thursday,

arma.scan=function(z,ar.max=15,ma.max=15,alpha=0.01)
{
  ym=function(z,t,m){return(z[t:(t-m)])}
  n=length(z)
  z=z - mean(z)
  cmax=ma.max + 1
  rmax=ar.max + 1
  corref=matrix(0,nrow=rmax,ncol=cmax)
  cmj.table=matrix(0,nrow=rmax,ncol=cmax)
  pv=matrix(0,nrow=rmax,ncol=cmax)
  mark=matrix(rep("X",(rmax)*(cmax)),nrow=rmax,ncol=cmax)
  Rnames=paste("AR",0:(ar.max),sep="-")
  Cnames=paste("MA",0:(ma.max),sep="-")
  rownames(corref)=Rnames
  colnames(corref)=Cnames
  rownames(cmj.table)=Rnames
  colnames(cmj.table)=Cnames
  rownames(pv)=Rnames
  colnames(pv)=Cnames
  rownames(mark)=Rnames
  colnames(mark)=Cnames
  for (m in 0:ar.max)
  {
   m1=m+1
   for (j in 0:ma.max)
   {
   j1=j+1 
   if (m == 0 && j != 0)  
   {
      racf=acf(z,plot=FALSE)$acf[1:(j+1)]    
      lamb=racf[j+1]^2    
      corref[m1,j]=round(lamb,4)
      dmj=1 + 2*sum(racf[1:j]^2)
      cmj=-1*(n-m-j)*log(1.0 - lamb/dmj)
      pvalue =pchisq(cmj,1,lower.tail=FALSE)
      pv[m1,j]=round(pvalue,4)
      cmj.table[m1,j]=round(cmj,4)
      mark[m1,j]=ifelse(pvalue > alpha,"O","X")    
    } 
    else if (m != 0 && j == 0) 
    {
      racf=pacf(z,plot=FALSE)$acf[1:(m+1)]
      lamb=racf[m+1]^2
      corref[m1,j1]=round(lamb,4)
      dmj = 1
      cmj=-1*(n-m-j)*log(1.0 - lamb/dmj)    
      pvalue =pchisq(cmj,1,lower.tail=FALSE)
      pv[m1,j1]=round(pvalue,4)
      cmj.table[m1,j1]=round(cmj,4)    
      mark[m1,j1]=ifelse(pvalue > alpha,"O","X")
    } 
    else
    {        
      mat1=matrix(0,nrow=m1,ncol=m1)
      mat2=matrix(0,nrow=m1,ncol=m1) 
      mat3=matrix(0,nrow=m1,ncol=m1)
      mat4=matrix(0,nrow=m1,ncol=m1)     
      for (t in (j+m+2):n)
      {
         tj1=t-j-1
         ym1=ym(z,tj1,m)
         ym2=ym(z,t,m)    

         mat1=mat1 + as.matrix(ym1)%*%ym1    
         mat2=mat2 + as.matrix(ym1)%*%ym2    
         mat3=mat3 + as.matrix(ym2)%*%ym2    
         mat4=mat4 + as.matrix(ym2)%*%ym1                
      }  
      b1=solve(mat1)%*%mat2
      b2=solve(mat3)%*%mat4
      A=b2%*%b1
      eig <-eigen(A)
      eig.val <-eig$values
      eig.val=Re(eig.val)
      eig.len=length(eig.val)
      eig.vector=eig$vectors
      lamb=min(eig.val)
      eig.vector0=eig.vector[,which.min(eig.val)]
      eig.vector0 = eig.vector0/eig.vector0[1]
      resid=(1:n)*0 
      for (t in (j+m+1):n)
      {
        z0=z[seq(t,t-m,-1)]      
        resid[t]=sum(z0 * eig.vector0)
      } 
      jm1=j + m + 1
      rx=Re(resid[jm1:n])
      racf=acf(rx,plot=FALSE)$acf[1:j]
      dmj=1 + 2*sum(racf^2)
      cmj=-1*(n-m-j)*log(1.0 - lamb/dmj)     
      pvalue =pchisq(cmj,df=1,lower.tail=FALSE)
      corref[m1,j1]=round(lamb,4)     
      pv[m1,j1]=round(pvalue,4)
      cmj.table[m1,j1]=round(cmj,4)    
      mark[m1,j1]=ifelse(pvalue > alpha,"O","X")
    }
   } 
  } 

  cat("\n\nSCAN: Smallest CANonical Correlation Method for ARIMA(p,d,q)\n\n")
  cat("Estimates of Squared Canonical Correlation \n\n")
  print(corref)
  cat("\n\nC(m,j)\n\n")
  print(cmj.table)
  cat("\n\nChi-Square(1) Test p-value\n\n")
  print(pv)
  cat("\nSCAN Matrix \n\n")
  print(mark)

plot(0:1,0:1,col="white",xlim=c(0,nrow(pv)-1),ylim=c(0,ncol(pv)-1),axes=FALSE,xlab="AR",ylab="MA")
axis(1); axis(2)
library(RColorBrewer)
CL=brewer.pal(6, "RdBu")[c(1,2,3,5)]
cpv=matrix(as.numeric(cut(as.vector(pv),c(-1,.01,.05,.1,2))),nrow(pv),ncol(pv))
for(i in 1:nrow(pv)){
for(j in 1:ncol(pv)){
 polygon(c(i-1,i-1,i,i)-.5,c(j-1,j,j,j-1)-.5,
 col=CL[cpv[i,j]])
}}
}

Consider the following simulated time series,

> s=arima.sim(n=200,model=list(ar=c(0,0,0,.4,0,0,0,.5),ma=c(0,0,1))) 
> plot(s,type="l")

The output is here

> arma.scan(s,6,6)

SCAN: Smallest CANonical Correlation Method for ARIMA(p,d,q)

Estimates of Squared Canonical Correlation 

       MA-0   MA-1   MA-2   MA-3   MA-4   MA-5   MA-6
AR-0 0.0614 0.0104 0.1862 0.3516 0.0971 0.0128 0.0000
AR-1 0.0302 0.0294 0.1501 0.0943 0.0855 0.0127 0.0385
AR-2 0.3070 0.2781 0.2140 0.0006 0.1589 0.1884 0.2243
AR-3 0.1627 0.0037 0.1927 0.2311 0.1379 0.0207 0.0376
AR-4 0.2087 0.3947 0.3653 0.3075 0.1502 0.1364 0.1013
AR-5 0.1677 0.1219 0.0110 0.0263 0.0332 0.0350 0.0044
AR-6 0.0114 0.0485 0.0561 0.0427 0.0009 0.0089 0.0308

C(m,j)

        MA-0    MA-1    MA-2    MA-3   MA-4   MA-5    MA-6
AR-0  4.1161  0.6585 12.0315 20.6512 4.5388 0.5620  0.0000
AR-1  6.1127  1.9499  9.9356  4.9145 4.7219 0.4642  1.9015
AR-2 72.6011 19.1679 14.3512  0.0337 7.9668 9.6479 11.4573
AR-3 34.9724  0.2386 10.1620 13.4082 6.7875 0.8725  1.4071
AR-4 45.8691 27.5070 19.1422 20.2835 7.3339 5.5374  3.5874
AR-5 35.7981  8.0498  0.6280  1.3543 1.8470 1.7930  0.2338
AR-6  2.2147  3.1466  3.5990  1.9904 0.0511 0.4816  1.6440

Chi-Square(1) Test p-value

       MA-0   MA-1   MA-2   MA-3   MA-4   MA-5   MA-6
AR-0 0.0425 0.4171 0.0005 0.0000 0.0331 0.4534 0.0000
AR-1 0.0134 0.1626 0.0016 0.0266 0.0298 0.4957 0.1679
AR-2 0.0000 0.0000 0.0002 0.8543 0.0048 0.0019 0.0007
AR-3 0.0000 0.6252 0.0014 0.0003 0.0092 0.3503 0.2355
AR-4 0.0000 0.0000 0.0000 0.0000 0.0068 0.0186 0.0582
AR-5 0.0000 0.0046 0.4281 0.2445 0.1741 0.1806 0.6287
AR-6 0.1367 0.0761 0.0578 0.1583 0.8212 0.4877 0.1998

SCAN Matrix 

     MA-0 MA-1 MA-2 MA-3 MA-4 MA-5 MA-6
AR-0 "O"  "O"  "X"  "X"  "O"  "O"  "X" 
AR-1 "O"  "O"  "X"  "O"  "O"  "O"  "O" 
AR-2 "X"  "X"  "X"  "O"  "X"  "X"  "X" 
AR-3 "X"  "O"  "X"  "X"  "X"  "O"  "O" 
AR-4 "X"  "X"  "X"  "X"  "X"  "O"  "O" 
AR-5 "X"  "X"  "O"  "O"  "O"  "O"  "O" 
AR-6 "O"  "O"  "O"  "O"  "O"  "O"  "O"

with the following graph

Of course, it is possible to ask for larger values,

> arma.scan(s,12,12)

The graph is now

Voting Twice in France

On the Monkey Cage blog, Baptiste Coulmont (a.k.a. @coulmont) recently uploaded a post entitled “You can vote twice ! The many political appeals of proxy votes in France“, coauthored with Joël Gombin (a.k.a. @joelgombin), and myself. The study was initially written in French as mentioned in a previous post. Baptiste posted additional information on his blog (http://coulmont.com/blog/…) and I also wanted to post some lines of code, to mention a model that was not used in that study (more complex to analyze, but more realistic, and with the same conclusions). The econometric study is based on aggregated voted, with a possible ecological misinterpretation.

  • Regression Model: Possible Explanatory Variables

The first idea was to model proxies using a binomial regression, per pooling station  where  denote the number of proxy vote, per station , and  denotes the number of voters. Proportion  can be a function of possible explanatory variables (on Baptiste’s blog there are additional information about the datasets, obtained from insee.fr and opendata.paris.fr)

> bt1=read.table("paris2007-pres-t1.csv",header=TRUE,sep=";")
> bt2=read.table("paris2007-pres-t2.csv",header=TRUE,sep=";")
> bv=read.table("paris-bv-insee-07.csv",header=TRUE,sep=";")
> bv$BV=bv$BVCOM
> baset1=merge(bt1,bv,by="BV")
> baset2=merge(bt2,bv,by="BV")
> baset1$LOGEMENT=baset1$PROPRIO+baset1$LOCNONHLM+baset1$LOCHLM+baset1$GRATUIT
> baset2$LOGEMENT=baset2$PROPRIO+baset2$LOCNONHLM+baset2$LOCHLM+baset2$GRATUIT

For instance, assume that  is a function of the proportion of owner of the place people live in, denoted  in the neighborhood of the pooling station,

> variable="PROPRIO"
> reference="LOGEMENT"
> baset1$taux=baset1[,variable]/baset1[,reference]
> baset2$taux=baset2[,variable]/baset2[,reference]

We can consider a logistic regression

or a logistic regression with splines, if we do not want to assume a linear model

With cubic splines, the code is

> b=hist(baset1$taux,plot=FALSE)
> library(splines)
> regt1=glm(PROCURATIONS/INSCRITS~bs(taux,6),family=binomial,weights=INSCRITS,data=baset1)
> regt2=glm(PROCURATIONS/INSCRITS~bs(taux,6),family=binomial,weights=INSCRITS,data=baset2)
> u=seq(min(baset1$taux)+.015,max(baset1$taux)-.015,by=.001)
> ND=data.frame(taux=u)
> ug=seq(0,max(baset1$taux)+.05,by=.001)
> pt1=predict(regt1,newdata=ND,se=TRUE,type="response")
> pt2=predict(regt2,newdata=ND,se=TRUE,type="response")
> library(RColorBrewer)
> CL=brewer.pal(6, "RdBu")
> plot(ug,ug*1,col="white",xlab=nom,ylab="Taux de procuration",
+ ylim=c(0,.1))
> for(i in 1:(length(b$breaks)-1)){
+ polygon(b$breaks[i+c(0,0,1,1)],c(0,b$counts[i],b$counts[i],0)
+ /max(b$counts)*.05,col="light yellow",border=NA)}
> polygon(c(u,rev(u)),c(pt1$fit+2*pt1$se.fit,rev(pt1$fit-2*pt1$se.fit)),
+ border=NA,density=30,col=CL[4])

while a standard logistic regression would be

> lines(u,pt1$fit,col=CL[6],lwd=2)
> polygon(c(u,rev(u)),c(pt2$fit+2*pt2$se.fit,rev(pt2$fit-2*pt2$se.fit)),
+ border=NA,density=30,col=CL[3])
> lines(u,pt2$fit,col=CL[1],lwd=2)
> regt1l=glm(PROCURATIONS/INSCRITS~taux,family=binomial,weights=INSCRITS,data=baset1)
> regt2l=glm(PROCURATIONS/INSCRITS~taux,family=binomial,weights=INSCRITS,data=baset2)
> ND=data.frame(taux=ug)
> pt1l=predict(regt1l,newdata=ND,se=TRUE,type="response")
> pt2l=predict(regt2l,newdata=ND,se=TRUE,type="response")
> lines(ug,pt1l$fit,col=CL[5],lty=2)
> lines(ug,pt2l$fit,col=CL[2],lty=2)
> legend(0,.1,c("Second Tour","Premier Tour"),col=CL[c(1,6)],
+ lwd=2,lty=1,border=NA)

Here it is (the confidence region is for the spline regression) with on blue the first round of the Presidential election, and in red, the second round (in France, it’s a two-round system)

(the legend of the y axis is not correct). We can consider as explanatory variable the rate of H.L.M., low-cost housing or council housing,

If I like the graph, unfortunately, the interpretation of coefficient  might be complicated

> summary(regt1l)

Call:
glm(formula = PROCURATIONS/INSCRITS ~ taux, family = binomial, 
    data = baset1, weights = INSCRITS)

Deviance Residuals: 
     Min        1Q    Median        3Q       Max  
-12.9549   -1.5722    0.0319    1.6292   13.1303  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -3.70811    0.01516  -244.6   <2e-16 ***
taux         1.49666    0.04012    37.3   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 12507  on 836  degrees of freedom
Residual deviance: 11065  on 835  degrees of freedom
AIC: 15699

Number of Fisher Scoring iterations: 4

> summary(regt2l)

Call:
glm(formula = PROCURATIONS/INSCRITS ~ taux, family = binomial, 
    data = baset2, weights = INSCRITS)

Deviance Residuals: 
     Min        1Q    Median        3Q       Max  
-15.4872   -1.7817   -0.1615    1.6035   12.5596  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -3.24272    0.01230 -263.61   <2e-16 ***
taux         1.45816    0.03266   44.65   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 9424.7  on 836  degrees of freedom
Residual deviance: 7362.3  on 835  degrees of freedom
AIC: 12531

Number of Fisher Scoring iterations: 4

So we did consider a standard linear regression model, for the proxy rate, per station,

(again, either a model with splines, or a standard linear model). The code is

> regt1=lm(PROCURATIONS/INSCRITS~bs(taux,6),weights=INSCRITS,data=baset1)
> regt2=lm(PROCURATIONS/INSCRITS~bs(taux,6),weights=INSCRITS,data=baset2)
> u=seq(min(baset1$taux)+.015,max(baset1$taux)-.015,by=.001)
> ND=data.frame(taux=u)
> ug=seq(0,max(baset1$taux)+.05,by=.001)
> pt1=predict(regt1,newdata=ND,se=TRUE,type="response")
> pt2=predict(regt2,newdata=ND,se=TRUE,type="response")
> library(RColorBrewer)
> CL=brewer.pal(6, "RdBu")
> plot(ug,ug*1,col="white",xlab=nom,ylab="Taux de procuration",
+ ylim=c(0,.1))
> for(i in 1:(length(b$breaks)-1)){
+ polygon(b$breaks[i+c(0,0,1,1)],c(0,b$counts[i],b$counts[i],0)
+ /max(b$counts)*.05,col="light yellow",border=NA)}
> polygon(c(u,rev(u)),c(pt1$fit+2*pt1$se.fit,rev(pt1$fit-2*pt1$se.fit)),
+ border=NA,density=30,col=CL[4])
> lines(u,pt1$fit,col=CL[6],lwd=2)
> polygon(c(u,rev(u)),c(pt2$fit+2*pt2$se.fit,rev(pt2$fit-2*pt2$se.fit)),
+ border=NA,density=30,col=CL[3])
> lines(u,pt2$fit,col=CL[1],lwd=2)
> regt1l=lm(PROCURATIONS/INSCRITS~taux,weights=INSCRITS,data=baset1)
> regt2l=lm(PROCURATIONS/INSCRITS~taux,weights=INSCRITS,data=baset2)
> ND=data.frame(taux=ug)
> pt1l=predict(regt1l,newdata=ND,se=TRUE,type="response")
> pt2l=predict(regt2l,newdata=ND,se=TRUE,type="response")
> lines(ug,pt1l$fit,col=CL[5],lty=2)
> lines(ug,pt2l$fit,col=CL[2],lty=2)
> legend(0,.1,c("Second Tour","Premier Tour"),col=CL[c(1,6)],
+ lwd=2,lty=1,border=NA)

Here is again the evolution as a function of the rate of owner of their homes,

The graph is rather close to the one before, and here, the interpretation of the summary table is more conventional,

> summary(regt1l)

Call:
lm(formula = PROCURATIONS/INSCRITS ~ taux, data = baset1, weights = INSCRITS)

Weighted Residuals:
    Min      1Q  Median      3Q     Max 
-1.9994 -0.2926  0.0011  0.3173  3.2072 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept) 0.021268   0.001739   12.23   <2e-16 ***
taux        0.054371   0.004812   11.30   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.646 on 835 degrees of freedom
Multiple R-squared:  0.1326,	Adjusted R-squared:  0.1316 
F-statistic: 127.7 on 1 and 835 DF,  p-value: < 2.2e-16

> summary(regt2l)

Call:
lm(formula = PROCURATIONS/INSCRITS ~ taux, data = baset2, weights = INSCRITS)

Weighted Residuals:
    Min      1Q  Median      3Q     Max 
-2.9029 -0.4148 -0.0338  0.4029  3.4907 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept) 0.033909   0.001866   18.17   <2e-16 ***
taux        0.079749   0.005165   15.44   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.6934 on 835 degrees of freedom
Multiple R-squared:  0.2221,	Adjusted R-squared:  0.2212 
F-statistic: 238.4 on 1 and 835 DF,  p-value: < 2.2e-16

We have used those codes to produce the graphs mentioned in the post. But before mentioning the residuals of the multiple model we considered, I wanted to share some awesome code that produce maps (I can say that those codes are awesome since Baptiste wrote most of them).

  • Visualization of Residuals on a Map of Paris

To plot the neighborhood of the pooling stations, one more time the post on Baptiste’s blog, explains how the shapefile was obtained from cartelec.net

> library(maptools)
> library(rgdal)
> library(classInt)
> paris=readShapeSpatial("paris-cartelec.shp")

To visualize the proxy rate (the average of round one and round two), here is the code

> elec=data.frame()
> elec=cbind(bt1$BV,(bt1$PROCURATIONS+bt2$PROCURATIONS),(bt1$EXPRIMES+bt2$EXPRIMES))
> colnames(elec)=c("BV","PROCURATIONS","EXPRIMES")
> elec=as.data.frame(elec)
> elec$BV=bt1$BV

To get nice colors, function of the rates, we use

> m=match(paris$BUREAU,elec$BV)
> plotvar=100*elec$PROCURATIONS/elec$EXPRIMES
> nclr=7
> plotclr=brewer.pal(nclr,"RdYlBu")[nclr:1] 
>(plotvar[m], nclr, style="fisher",dataPrecision=1)
> colcode=findColours(class, plotclr)

and finally

> par(mar=c(1,1,1,1))
> plot(paris,col=colcode,border=colcode)
> legend(656274.9, 6867308,legend=names(attr(colcode,"table")), 
+ fill=attr(colcode, "palette"), cex=1, bty="n",
+ title="Frequence procurations (%)")

If we consider a model with three explanatory variable, to explain the proxy rate,

> regt1=lm(PROCURATIONS/INSCRITS~I(POP65P/POP)+
+ I(PROPRIO/LOGEMENT)+I(CS3/POP1564),weights=INSCRITS,data=baset1)

we can plot the residuals using

> m=match(paris$BUREAU,elec$BV)
> plotvar=100*residuals(regt1)
> nclr=7
> plotclr=brewer.pal(nclr,"RdYlBu")[nclr:1] 
>(plotvar[m], nclr, style="fisher",dataPrecision=1)
> colcode=findColours(class, plotclr)
> par(mar=c(1,1,1,1))
> plot(paris,col=colcode,border=colcode)
> legend(656274.9, 6867308,legend=names(attr(colcode,"table")), 
+ fill=attr(colcode, "palette"), cex=1, bty="n",title="Residus")

It might not be a pure random spatial noise… But we could not get better with our small set of covariates.