Identification of ARMA processes

Last week (in the MAT8181 course) in order to identify the orders of an ARMA process, we’ve seen the eacf method, and I mentioned the scan method, introduced in Tsay and Tiao (1985). The code below – to produce the output of the scan procedure – has been adapted from an old code by Steve Chen (where I included a visualization of the p-values, with the following colors)

The procedure was described in the course, last Thursday,

arma.scan=function(z,ar.max=15,ma.max=15,alpha=0.01)
{
  ym=function(z,t,m){return(z[t:(t-m)])}
  n=length(z)
  z=z - mean(z)
  cmax=ma.max + 1
  rmax=ar.max + 1
  corref=matrix(0,nrow=rmax,ncol=cmax)
  cmj.table=matrix(0,nrow=rmax,ncol=cmax)
  pv=matrix(0,nrow=rmax,ncol=cmax)
  mark=matrix(rep("X",(rmax)*(cmax)),nrow=rmax,ncol=cmax)
  Rnames=paste("AR",0:(ar.max),sep="-")
  Cnames=paste("MA",0:(ma.max),sep="-")
  rownames(corref)=Rnames
  colnames(corref)=Cnames
  rownames(cmj.table)=Rnames
  colnames(cmj.table)=Cnames
  rownames(pv)=Rnames
  colnames(pv)=Cnames
  rownames(mark)=Rnames
  colnames(mark)=Cnames
  for (m in 0:ar.max)
  {
   m1=m+1
   for (j in 0:ma.max)
   {
   j1=j+1 
   if (m == 0 && j != 0)  
   {
      racf=acf(z,plot=FALSE)$acf[1:(j+1)]    
      lamb=racf[j+1]^2    
      corref[m1,j]=round(lamb,4)
      dmj=1 + 2*sum(racf[1:j]^2)
      cmj=-1*(n-m-j)*log(1.0 - lamb/dmj)
      pvalue =pchisq(cmj,1,lower.tail=FALSE)
      pv[m1,j]=round(pvalue,4)
      cmj.table[m1,j]=round(cmj,4)
      mark[m1,j]=ifelse(pvalue > alpha,"O","X")    
    } 
    else if (m != 0 && j == 0) 
    {
      racf=pacf(z,plot=FALSE)$acf[1:(m+1)]
      lamb=racf[m+1]^2
      corref[m1,j1]=round(lamb,4)
      dmj = 1
      cmj=-1*(n-m-j)*log(1.0 - lamb/dmj)    
      pvalue =pchisq(cmj,1,lower.tail=FALSE)
      pv[m1,j1]=round(pvalue,4)
      cmj.table[m1,j1]=round(cmj,4)    
      mark[m1,j1]=ifelse(pvalue > alpha,"O","X")
    } 
    else
    {        
      mat1=matrix(0,nrow=m1,ncol=m1)
      mat2=matrix(0,nrow=m1,ncol=m1) 
      mat3=matrix(0,nrow=m1,ncol=m1)
      mat4=matrix(0,nrow=m1,ncol=m1)     
      for (t in (j+m+2):n)
      {
         tj1=t-j-1
         ym1=ym(z,tj1,m)
         ym2=ym(z,t,m)    

         mat1=mat1 + as.matrix(ym1)%*%ym1    
         mat2=mat2 + as.matrix(ym1)%*%ym2    
         mat3=mat3 + as.matrix(ym2)%*%ym2    
         mat4=mat4 + as.matrix(ym2)%*%ym1                
      }  
      b1=solve(mat1)%*%mat2
      b2=solve(mat3)%*%mat4
      A=b2%*%b1
      eig <-eigen(A)
      eig.val <-eig$values
      eig.val=Re(eig.val)
      eig.len=length(eig.val)
      eig.vector=eig$vectors
      lamb=min(eig.val)
      eig.vector0=eig.vector[,which.min(eig.val)]
      eig.vector0 = eig.vector0/eig.vector0[1]
      resid=(1:n)*0 
      for (t in (j+m+1):n)
      {
        z0=z[seq(t,t-m,-1)]      
        resid[t]=sum(z0 * eig.vector0)
      } 
      jm1=j + m + 1
      rx=Re(resid[jm1:n])
      racf=acf(rx,plot=FALSE)$acf[1:j]
      dmj=1 + 2*sum(racf^2)
      cmj=-1*(n-m-j)*log(1.0 - lamb/dmj)     
      pvalue =pchisq(cmj,df=1,lower.tail=FALSE)
      corref[m1,j1]=round(lamb,4)     
      pv[m1,j1]=round(pvalue,4)
      cmj.table[m1,j1]=round(cmj,4)    
      mark[m1,j1]=ifelse(pvalue > alpha,"O","X")
    }
   } 
  } 

  cat("\n\nSCAN: Smallest CANonical Correlation Method for ARIMA(p,d,q)\n\n")
  cat("Estimates of Squared Canonical Correlation \n\n")
  print(corref)
  cat("\n\nC(m,j)\n\n")
  print(cmj.table)
  cat("\n\nChi-Square(1) Test p-value\n\n")
  print(pv)
  cat("\nSCAN Matrix \n\n")
  print(mark)

plot(0:1,0:1,col="white",xlim=c(0,nrow(pv)-1),ylim=c(0,ncol(pv)-1),axes=FALSE,xlab="AR",ylab="MA")
axis(1); axis(2)
library(RColorBrewer)
CL=brewer.pal(6, "RdBu")[c(1,2,3,5)]
cpv=matrix(as.numeric(cut(as.vector(pv),c(-1,.01,.05,.1,2))),nrow(pv),ncol(pv))
for(i in 1:nrow(pv)){
for(j in 1:ncol(pv)){
 polygon(c(i-1,i-1,i,i)-.5,c(j-1,j,j,j-1)-.5,
 col=CL[cpv[i,j]])
}}
}

Consider the following simulated time series,

> s=arima.sim(n=200,model=list(ar=c(0,0,0,.4,0,0,0,.5),ma=c(0,0,1))) 
> plot(s,type="l")

The output is here

> arma.scan(s,6,6)

SCAN: Smallest CANonical Correlation Method for ARIMA(p,d,q)

Estimates of Squared Canonical Correlation 

       MA-0   MA-1   MA-2   MA-3   MA-4   MA-5   MA-6
AR-0 0.0614 0.0104 0.1862 0.3516 0.0971 0.0128 0.0000
AR-1 0.0302 0.0294 0.1501 0.0943 0.0855 0.0127 0.0385
AR-2 0.3070 0.2781 0.2140 0.0006 0.1589 0.1884 0.2243
AR-3 0.1627 0.0037 0.1927 0.2311 0.1379 0.0207 0.0376
AR-4 0.2087 0.3947 0.3653 0.3075 0.1502 0.1364 0.1013
AR-5 0.1677 0.1219 0.0110 0.0263 0.0332 0.0350 0.0044
AR-6 0.0114 0.0485 0.0561 0.0427 0.0009 0.0089 0.0308

C(m,j)

        MA-0    MA-1    MA-2    MA-3   MA-4   MA-5    MA-6
AR-0  4.1161  0.6585 12.0315 20.6512 4.5388 0.5620  0.0000
AR-1  6.1127  1.9499  9.9356  4.9145 4.7219 0.4642  1.9015
AR-2 72.6011 19.1679 14.3512  0.0337 7.9668 9.6479 11.4573
AR-3 34.9724  0.2386 10.1620 13.4082 6.7875 0.8725  1.4071
AR-4 45.8691 27.5070 19.1422 20.2835 7.3339 5.5374  3.5874
AR-5 35.7981  8.0498  0.6280  1.3543 1.8470 1.7930  0.2338
AR-6  2.2147  3.1466  3.5990  1.9904 0.0511 0.4816  1.6440

Chi-Square(1) Test p-value

       MA-0   MA-1   MA-2   MA-3   MA-4   MA-5   MA-6
AR-0 0.0425 0.4171 0.0005 0.0000 0.0331 0.4534 0.0000
AR-1 0.0134 0.1626 0.0016 0.0266 0.0298 0.4957 0.1679
AR-2 0.0000 0.0000 0.0002 0.8543 0.0048 0.0019 0.0007
AR-3 0.0000 0.6252 0.0014 0.0003 0.0092 0.3503 0.2355
AR-4 0.0000 0.0000 0.0000 0.0000 0.0068 0.0186 0.0582
AR-5 0.0000 0.0046 0.4281 0.2445 0.1741 0.1806 0.6287
AR-6 0.1367 0.0761 0.0578 0.1583 0.8212 0.4877 0.1998

SCAN Matrix 

     MA-0 MA-1 MA-2 MA-3 MA-4 MA-5 MA-6
AR-0 "O"  "O"  "X"  "X"  "O"  "O"  "X" 
AR-1 "O"  "O"  "X"  "O"  "O"  "O"  "O" 
AR-2 "X"  "X"  "X"  "O"  "X"  "X"  "X" 
AR-3 "X"  "O"  "X"  "X"  "X"  "O"  "O" 
AR-4 "X"  "X"  "X"  "X"  "X"  "O"  "O" 
AR-5 "X"  "X"  "O"  "O"  "O"  "O"  "O" 
AR-6 "O"  "O"  "O"  "O"  "O"  "O"  "O"

with the following graph

Of course, it is possible to ask for larger values,

> arma.scan(s,12,12)

The graph is now

Voting Twice in France

On the Monkey Cage blog, Baptiste Coulmont (a.k.a. @coulmont) recently uploaded a post entitled “You can vote twice ! The many political appeals of proxy votes in France“, coauthored with Joël Gombin (a.k.a. @joelgombin), and myself. The study was initially written in French as mentioned in a previous post. Baptiste posted additional information on his blog (http://coulmont.com/blog/…) and I also wanted to post some lines of code, to mention a model that was not used in that study (more complex to analyze, but more realistic, and with the same conclusions). The econometric study is based on aggregated voted, with a possible ecological misinterpretation.

  • Regression Model: Possible Explanatory Variables

The first idea was to model proxies using a binomial regression, per pooling station  where  denote the number of proxy vote, per station , and  denotes the number of voters. Proportion  can be a function of possible explanatory variables (on Baptiste’s blog there are additional information about the datasets, obtained from insee.fr and opendata.paris.fr)

> bt1=read.table("paris2007-pres-t1.csv",header=TRUE,sep=";")
> bt2=read.table("paris2007-pres-t2.csv",header=TRUE,sep=";")
> bv=read.table("paris-bv-insee-07.csv",header=TRUE,sep=";")
> bv$BV=bv$BVCOM
> baset1=merge(bt1,bv,by="BV")
> baset2=merge(bt2,bv,by="BV")
> baset1$LOGEMENT=baset1$PROPRIO+baset1$LOCNONHLM+baset1$LOCHLM+baset1$GRATUIT
> baset2$LOGEMENT=baset2$PROPRIO+baset2$LOCNONHLM+baset2$LOCHLM+baset2$GRATUIT

For instance, assume that  is a function of the proportion of owner of the place people live in, denoted  in the neighborhood of the pooling station,

> variable="PROPRIO"
> reference="LOGEMENT"
> baset1$taux=baset1[,variable]/baset1[,reference]
> baset2$taux=baset2[,variable]/baset2[,reference]

We can consider a logistic regression

or a logistic regression with splines, if we do not want to assume a linear model

With cubic splines, the code is

> b=hist(baset1$taux,plot=FALSE)
> library(splines)
> regt1=glm(PROCURATIONS/INSCRITS~bs(taux,6),family=binomial,weights=INSCRITS,data=baset1)
> regt2=glm(PROCURATIONS/INSCRITS~bs(taux,6),family=binomial,weights=INSCRITS,data=baset2)
> u=seq(min(baset1$taux)+.015,max(baset1$taux)-.015,by=.001)
> ND=data.frame(taux=u)
> ug=seq(0,max(baset1$taux)+.05,by=.001)
> pt1=predict(regt1,newdata=ND,se=TRUE,type="response")
> pt2=predict(regt2,newdata=ND,se=TRUE,type="response")
> library(RColorBrewer)
> CL=brewer.pal(6, "RdBu")
> plot(ug,ug*1,col="white",xlab=nom,ylab="Taux de procuration",
+ ylim=c(0,.1))
> for(i in 1:(length(b$breaks)-1)){
+ polygon(b$breaks[i+c(0,0,1,1)],c(0,b$counts[i],b$counts[i],0)
+ /max(b$counts)*.05,col="light yellow",border=NA)}
> polygon(c(u,rev(u)),c(pt1$fit+2*pt1$se.fit,rev(pt1$fit-2*pt1$se.fit)),
+ border=NA,density=30,col=CL[4])

while a standard logistic regression would be

> lines(u,pt1$fit,col=CL[6],lwd=2)
> polygon(c(u,rev(u)),c(pt2$fit+2*pt2$se.fit,rev(pt2$fit-2*pt2$se.fit)),
+ border=NA,density=30,col=CL[3])
> lines(u,pt2$fit,col=CL[1],lwd=2)
> regt1l=glm(PROCURATIONS/INSCRITS~taux,family=binomial,weights=INSCRITS,data=baset1)
> regt2l=glm(PROCURATIONS/INSCRITS~taux,family=binomial,weights=INSCRITS,data=baset2)
> ND=data.frame(taux=ug)
> pt1l=predict(regt1l,newdata=ND,se=TRUE,type="response")
> pt2l=predict(regt2l,newdata=ND,se=TRUE,type="response")
> lines(ug,pt1l$fit,col=CL[5],lty=2)
> lines(ug,pt2l$fit,col=CL[2],lty=2)
> legend(0,.1,c("Second Tour","Premier Tour"),col=CL[c(1,6)],
+ lwd=2,lty=1,border=NA)

Here it is (the confidence region is for the spline regression) with on blue the first round of the Presidential election, and in red, the second round (in France, it’s a two-round system)

(the legend of the y axis is not correct). We can consider as explanatory variable the rate of H.L.M., low-cost housing or council housing,

If I like the graph, unfortunately, the interpretation of coefficient  might be complicated

> summary(regt1l)

Call:
glm(formula = PROCURATIONS/INSCRITS ~ taux, family = binomial, 
    data = baset1, weights = INSCRITS)

Deviance Residuals: 
     Min        1Q    Median        3Q       Max  
-12.9549   -1.5722    0.0319    1.6292   13.1303  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -3.70811    0.01516  -244.6   <2e-16 ***
taux         1.49666    0.04012    37.3   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 12507  on 836  degrees of freedom
Residual deviance: 11065  on 835  degrees of freedom
AIC: 15699

Number of Fisher Scoring iterations: 4

> summary(regt2l)

Call:
glm(formula = PROCURATIONS/INSCRITS ~ taux, family = binomial, 
    data = baset2, weights = INSCRITS)

Deviance Residuals: 
     Min        1Q    Median        3Q       Max  
-15.4872   -1.7817   -0.1615    1.6035   12.5596  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -3.24272    0.01230 -263.61   <2e-16 ***
taux         1.45816    0.03266   44.65   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 9424.7  on 836  degrees of freedom
Residual deviance: 7362.3  on 835  degrees of freedom
AIC: 12531

Number of Fisher Scoring iterations: 4

So we did consider a standard linear regression model, for the proxy rate, per station,

(again, either a model with splines, or a standard linear model). The code is

> regt1=lm(PROCURATIONS/INSCRITS~bs(taux,6),weights=INSCRITS,data=baset1)
> regt2=lm(PROCURATIONS/INSCRITS~bs(taux,6),weights=INSCRITS,data=baset2)
> u=seq(min(baset1$taux)+.015,max(baset1$taux)-.015,by=.001)
> ND=data.frame(taux=u)
> ug=seq(0,max(baset1$taux)+.05,by=.001)
> pt1=predict(regt1,newdata=ND,se=TRUE,type="response")
> pt2=predict(regt2,newdata=ND,se=TRUE,type="response")
> library(RColorBrewer)
> CL=brewer.pal(6, "RdBu")
> plot(ug,ug*1,col="white",xlab=nom,ylab="Taux de procuration",
+ ylim=c(0,.1))
> for(i in 1:(length(b$breaks)-1)){
+ polygon(b$breaks[i+c(0,0,1,1)],c(0,b$counts[i],b$counts[i],0)
+ /max(b$counts)*.05,col="light yellow",border=NA)}
> polygon(c(u,rev(u)),c(pt1$fit+2*pt1$se.fit,rev(pt1$fit-2*pt1$se.fit)),
+ border=NA,density=30,col=CL[4])
> lines(u,pt1$fit,col=CL[6],lwd=2)
> polygon(c(u,rev(u)),c(pt2$fit+2*pt2$se.fit,rev(pt2$fit-2*pt2$se.fit)),
+ border=NA,density=30,col=CL[3])
> lines(u,pt2$fit,col=CL[1],lwd=2)
> regt1l=lm(PROCURATIONS/INSCRITS~taux,weights=INSCRITS,data=baset1)
> regt2l=lm(PROCURATIONS/INSCRITS~taux,weights=INSCRITS,data=baset2)
> ND=data.frame(taux=ug)
> pt1l=predict(regt1l,newdata=ND,se=TRUE,type="response")
> pt2l=predict(regt2l,newdata=ND,se=TRUE,type="response")
> lines(ug,pt1l$fit,col=CL[5],lty=2)
> lines(ug,pt2l$fit,col=CL[2],lty=2)
> legend(0,.1,c("Second Tour","Premier Tour"),col=CL[c(1,6)],
+ lwd=2,lty=1,border=NA)

Here is again the evolution as a function of the rate of owner of their homes,

The graph is rather close to the one before, and here, the interpretation of the summary table is more conventional,

> summary(regt1l)

Call:
lm(formula = PROCURATIONS/INSCRITS ~ taux, data = baset1, weights = INSCRITS)

Weighted Residuals:
    Min      1Q  Median      3Q     Max 
-1.9994 -0.2926  0.0011  0.3173  3.2072 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept) 0.021268   0.001739   12.23   <2e-16 ***
taux        0.054371   0.004812   11.30   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.646 on 835 degrees of freedom
Multiple R-squared:  0.1326,	Adjusted R-squared:  0.1316 
F-statistic: 127.7 on 1 and 835 DF,  p-value: < 2.2e-16

> summary(regt2l)

Call:
lm(formula = PROCURATIONS/INSCRITS ~ taux, data = baset2, weights = INSCRITS)

Weighted Residuals:
    Min      1Q  Median      3Q     Max 
-2.9029 -0.4148 -0.0338  0.4029  3.4907 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept) 0.033909   0.001866   18.17   <2e-16 ***
taux        0.079749   0.005165   15.44   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.6934 on 835 degrees of freedom
Multiple R-squared:  0.2221,	Adjusted R-squared:  0.2212 
F-statistic: 238.4 on 1 and 835 DF,  p-value: < 2.2e-16

We have used those codes to produce the graphs mentioned in the post. But before mentioning the residuals of the multiple model we considered, I wanted to share some awesome code that produce maps (I can say that those codes are awesome since Baptiste wrote most of them).

  • Visualization of Residuals on a Map of Paris

To plot the neighborhood of the pooling stations, one more time the post on Baptiste’s blog, explains how the shapefile was obtained from cartelec.net

> library(maptools)
> library(rgdal)
> library(classInt)
> paris=readShapeSpatial("paris-cartelec.shp")

To visualize the proxy rate (the average of round one and round two), here is the code

> elec=data.frame()
> elec=cbind(bt1$BV,(bt1$PROCURATIONS+bt2$PROCURATIONS),(bt1$EXPRIMES+bt2$EXPRIMES))
> colnames(elec)=c("BV","PROCURATIONS","EXPRIMES")
> elec=as.data.frame(elec)
> elec$BV=bt1$BV

To get nice colors, function of the rates, we use

> m=match(paris$BUREAU,elec$BV)
> plotvar=100*elec$PROCURATIONS/elec$EXPRIMES
> nclr=7
> plotclr=brewer.pal(nclr,"RdYlBu")[nclr:1] 
>(plotvar[m], nclr, style="fisher",dataPrecision=1)
> colcode=findColours(class, plotclr)

and finally

> par(mar=c(1,1,1,1))
> plot(paris,col=colcode,border=colcode)
> legend(656274.9, 6867308,legend=names(attr(colcode,"table")), 
+ fill=attr(colcode, "palette"), cex=1, bty="n",
+ title="Frequence procurations (%)")

If we consider a model with three explanatory variable, to explain the proxy rate,

> regt1=lm(PROCURATIONS/INSCRITS~I(POP65P/POP)+
+ I(PROPRIO/LOGEMENT)+I(CS3/POP1564),weights=INSCRITS,data=baset1)

we can plot the residuals using

> m=match(paris$BUREAU,elec$BV)
> plotvar=100*residuals(regt1)
> nclr=7
> plotclr=brewer.pal(nclr,"RdYlBu")[nclr:1] 
>(plotvar[m], nclr, style="fisher",dataPrecision=1)
> colcode=findColours(class, plotclr)
> par(mar=c(1,1,1,1))
> plot(paris,col=colcode,border=colcode)
> legend(656274.9, 6867308,legend=names(attr(colcode,"table")), 
+ fill=attr(colcode, "palette"), cex=1, bty="n",title="Residus")

It might not be a pure random spatial noise… But we could not get better with our small set of covariates.

Bivariate Densities with N(0,1) Margins

This Monday, in the ACT8595 course, we came back on elliptical distributions and conditional independence (here is an old post on de Finetti’s theorem, and the extension to Hewitt-Savage’s). I have shown simulations, to illustrate those two concepts of dependent variables, but I wanted to spend some time to visualize densities. More specifically what could be the joint density is we assume that margins are  distributions.

  • The Bivariate Gaussian distribution

Here, we consider a Gaussian random vector, with margins , and with correlation . This is the standard graph, with elliptical isodensity curves

r=.5
library(mnormt)
S=matrix(c(1,r,r,1),2,2)
f=function(x,y) dmnorm(cbind(x,y),varcov=S)
vx=seq(-3,3,length=201)
vy=seq(-3,3,length=201)
z=outer(vx,vy,f)
set.seed(1)
X=rmnorm(1500,varcov=S)
xhist <- hist(X[,1], plot=FALSE)
yhist <- hist(X[,2], plot=FALSE)
top <- max(c(xhist$density, yhist$density,dnorm(0)))
nf <- layout(matrix(c(2,0,1,3),2,2,byrow=TRUE), c(3,1), c(1,3), TRUE)
par(mar=c(3,3,1,1))
image(vx,vy,z,col=rev(heat.colors(101)))
contour(vx,vy,z,col="blue",add=TRUE)
points(X,cex=.2)
par(mar=c(0,3,1,1))
barplot(xhist$density, axes=FALSE, ylim=c(0, top), space=0,col="light green")
lines((density(X[,1])$x-xhist$breaks[1])/diff(xhist$breaks)[1],
dnorm(density(X[,1])$x),col="red")
par(mar=c(3,0,1,1))
barplot(yhist$density, axes=FALSE, xlim=c(0, top), space=0, 
horiz=TRUE,col="light green")
lines(dnorm(density(X[,2])$x),(density(X[,2])$x-yhist$breaks[1])/
diff(yhist$breaks)[1],col="red")

That was the simple part.

  • The Bivariate Student-t distribution

Consider now another elliptical distribution. But we want here to normalize the margins. Thus, instead of a pair , we would like to consider the pair , so that the marginal distributions are . The new density is obtained simply since the transformation is a one-to-one increasing transformation. Here, we have

k=3
r=.5
G=function(x) qnorm(pt(x,df=k))
dg=function(x) dt(x,df=k)/dnorm(qnorm(pt(x,df=k)))
Ginv=function(x) qt(pnorm(x),df=k)
S=matrix(c(1,r,r,1),2,2)
f=function(x,y) dmt(cbind(Ginv(x),Ginv(y)),S=S,df=k)/(dg(x)*dg(y))
vx=seq(-3,3,length=201)
vy=seq(-3,3,length=201)
z=outer(vx,vy,f)
set.seed(1)
Z=rmt(1500,S=S,df=k)
X=G(Z)

Because we considered a nonlinear transformation of the margins, the level curves are no longer elliptical. But there is still some kind of symmetry.

  • The Exchangeable Case with Conditionally Independent Random Variables

We did consider the case where  and  with independent random variables, given , and that both variables are exponentially distributed, with parameter . As we’ve seen in class, it might be difficult to visualize that sample, unless we have log scales on both axis. But instead of a log transformation, why not consider a transformation so that margins will be . The only technical problem is that we do not have the (nonconditional) distributions of the margins. Well, we have them, but they are integral based. From a computational point of view, that’s not a bit deal… Computations might take a while, but we can visualize the density using the following code (here, we assume that  is Gamma distributed)

a=.6
b=1
h=.0001
G=function(x) qnorm(ifelse(x<0,0,integrate(function(z) pexp(x,z)*
dgamma(z,a,b),lower=0,upper=Inf)$value))
Ginv=function(x) uniroot(function(z) G(z)-x,lower=-40,upper=1e5)$root
dg=function(x) (Ginv(x+h)-Ginv(x-h))/2/h
H=function(xy) integrate(function(z) dexp(xy[2],z)*dexp(xy[1],z)*
dgamma(z,a,b),lower=0,upper=Inf)$value
f=function(x,y) H(c(Ginv(x),Ginv(y)))*(dg(x)*dg(y))
vx=seq(-3,3,length=151)
vy=seq(-3,3,length=151)
z=matrix(NA,length(vx),length(vy))
for(i in 1:length(vx)){
for(j in 1:length(vy)){
z[i,j]=f(vx[i],vy[j])}}
set.seed(1)
Theta=rgamma(1500,a,b)
Z=cbind(rexp(1500,Theta),rexp(1500,Theta))
X=cbind(Vectorize(G)(Z[,1]),Vectorize(G)(Z[,2]))

There is a small technical problem, but no big deal.

Here, the joint distribution is quite different. Margins are – one more time – standard Gaussian, but the shape of the joint distribution is quite different, with an asymmetry from the lower (left) tail to the upper (right) tail. More details when we’ll introduce copulas. The only difference will be that the margins will be uniform on the unit interval, and not standard Gaussian.

Academic Blogging, a Personal Experience

I wanted to get back on two posts (one on Academic Blogging, and one on Twitter for Academics), and to update them based on the discussion that followed the panel (on Thanksgiving), as well as some more recent discussions. All comments are welcome ! [the post is quite long, a pdf version is available from papers.ssrn.com/2398377]

  • Introduction

To talk in public, to think in solitude, to read and to hear, to inquire and answer inquiries, is the business of a scholar” (from Johnson (1759)). 250 years after, anyone might think that the business has not changed: academics remain in their ivory tower, but from time to time, they have to leave it, to communicate. Recently, Graham (2004) characterized three channels for scholar communications: publication in peer-reviewed journals, conferences and seminars, and a more informal one, that might be called the “new invisible college“, as in  Halavais (2006), that might be related to a “faculty lounge“, using the expression of  Priem, Piwowar, & Hemminger (2012). This third channel is precisely the one that might be related to blogs.

With Internet (emails, blogs, forums, etc), academics now have new mediums to communicate, either within their own community (and launch participative projects), or outside their community. Academic blogs are one medium, among many others.  As explained in Gregg (2006), “blogs have made scholarly work accessible and accountable to a readership outside the academy“. From an insider’s perspective, blogs seem to be extremely popular, because bloggers are active, sharing links, comments, discussions, etc. In 2007, George Siemens was already enthusiastic: “it’s great to see research-focused academics entering the blog space” (see Siemens (2007)). By that time, it was  clearly not as popular as it could be seven years after.

Social medias can be categorized in eight categories, according to Gu & Widén-Wulff (2010) and  Nicolas & Rowlands (2011): blogs, microblogs, RSS, wikis, tagging, social networks, media sharing, and online documents.  For instance, there is also an emerging form of participatory media, with question and answer services, with Quora, and Stackoverflow. But it won’t be mentioned here (some researchers are extremely active there, but it is very difficult to quantify). In this article, we will get back on more than five years of experience, on a scholar blog.  In the first section, we will get back on definitions, on the context, and the origins of the frekonometrics blog, while it started as one of the few official blogs of researchers at Université de Rennes 1, in France. Then, in the second section, we will study the interplay between a blog and other activities within academia. Then, in a third section, there will be a short discussion about microblogging, and the use of Twitter for academics. And finally, the last section will try to explain why, even after almost six years of experience, an academic blogger might still be enthusiastic.

  • The Origins and the Context of the Blog

From a technical point of view, blog is a contracted form of weblog, which is a website made up of ongoing entries, that we will call posts (but might also be called articles). Those posts are published in reverse chronological order: the new material is added to the top of the page, the older materials being automatically archived. So it makes it difficult to follows for infrequent readers. There might be tags and categories, which can be used to distinguish posts related to conference, publications, and teaching, or using dedicated keywords.

  • Hosting Platforms and Identity of the Blog

The first blog was launched because the Université de Rennes 1 (I was just recruited there as a professor, not yet tenured) decided in 2007 to have its own blog platform for its researchers. It was officially Arthur Charpentier’s blog. On this platform, blogs were not anonymous, they were based on the credibility of the researcher, and the name of the university was on the front page, almost as large as the name of the researcher.

Being hosted by the university is a dangerous practice, since it is not clear what is allowed, and what is not. For instance, during the first year, there were a lot of discussions about the status of researchers, in France. Being not tenured put me in a delicate situation. Even if the blog became well referenced on search engines very fast, it was not very popular, by that time. Except perhaps among students (mine, but also students in other programs). Students understood the interest of the blog, as a place to discuss and to interact. The blog became and extension of the class. After the formal lecture, in the room, the blog became a place to share additional documents, datasets, computer codes, etc. After two or three years, the blog started to be popular, within my own community (in economics, econometrics and applied mathematics), not only among students. I started to be recognized in conferences, and I wanted to stop having an eponymous blog (coincidence, or not, it was also by the time I moved to Montréal). Having an eponymous academic blog is a common practice, which makes sense since most bloggers tend to identify their blogs, as both personal and professional. All the more within academia, where boundaries between professional, academic and personal life may be difficult to establish, mainly because all those aspects intertwined constantly in the life of scholars.

Again, having a blog hosted by the university might yield delicate situations. Those institutions would like to use blogs as communication tools, and might not appreciate when the blog is use to publish satire of one’s own department. After a few years, I wanted to transfer the blog somewhere else. Using the term introduced by Charlotte Frost, used in  Mewburn & Thomson (2013), I wanted my blog to be – still – some sort of academic blog, but an “outstitutional” blog, more than an institutional one. I decided to host the blog on my personal internet provider’s platform. After almost three years at Université de Rennes1, the freakonometrics adventure started officially.

With a personal blog, it became necessary to claim that the blog was some sort of academic blog. Marsh (2013) mention that “many people arrive and depart my blog without knowing I’m an academic. I don’t hide the fact. It’s there“.  The name of the researcher appears in the “About page“, or in the resume, which is available on the front page. There is the blogger, freakonometrics and the scholar, Arthur Charpentier. Those two identities overlap, considerably, without being entierely congruent. When one reaches an academic webpage, there is usually no surprise: we expect to find information about the academic background, a list of publications, some information about trained students, lectures notes. But all academic bloggers have their own identities, which is a reflect of its author. Some bloggers like to post pictures taken from places they visit. Others talk about their family, or articles they recently read. Because blogs might have diversified contents, they tend to attract very different audiences (we will get back on that point in the next section). I do not hide myself behind a pseudo (even if I use one) and I still mention my academic background in order to give my blog more credibility. Noel (2013) claims that “a blog does not indicate expertise“.  Traditionally, it is true that a stack of peer-reviewed work automatically indicates some expertise. But it might be time to assess that a well-read blog, with valuable contributions in some field can also indicate some expertise, too.

Several platforms were hosting blogs by that time. Some interesting blogs were hosted by scientific journals (such as The American Scientist or Nature) or societies (in Economics, or Statistics).  But those blogs are somehow institutional. Even if my area of expertise if on the border of social sciences, I decided to migrate to the hypotheses.org platform, a “platform for academic blogs in the humanities and social sciences“. This platform has its own scientific committees, and provides a technical support (not to mention the legal one).

  • Influences and the Blogging Community

Freakonometrics became a blog dedicated to econometrics, economics and applied mathematics. As explained in Quiggin (2011a), “with the arguable exception of law, economics is the academic discipline where blogging has been embraced most enthusiastically“. This might explain why there is such an active – and enthusiastic – community (see also Quiggin, (2011b) for a discussion). On the other hand, Halford (2012) said that “the situation within academic blogging seems to be that we are currently a bunch of islands that are vaguely connected but not really arranged into continents and groups. We are all spread out across the digital world with a fragmented network between us.

So, what can we find in this community? Some researchers like to use their name, like Greg Mankiw’s blog for instance (with subtitle “random observations for students of economics“). But most of them prefer to hide themselves behind a short title, like “Confessions of a Supply-Side Liberal” (by Miles Kimball), “The Conscience of a Liberal” (by Paul Krugman), or longer one, like “Statistical Modeling, Causal Inference, and Social Science” (by Andrew Gelman). But hide is probably too strong since the editor is never hidden: we usually find a short bio, including a picture (most of the time), as well as a link to a webpage hosted by some university. Other examples might be “I’m a bandit“, with subtitle “random topics on optimization, probability and statistics. by Sébastien Bubeck“, or “what’s new“,  with subtitle “updates on my research and expository papers, discussion of open problems, and other maths-related topics. by Terence Tao“. Some blogs use puns (it is a feature that one can find on almost any blog: most of them use humour, just to explain that this is just a blog) like “Hyndsight“, “a blog by Rob Hyndman“.

Most of those popular blogs are blogs of well-know academics. Greg Mankin is not only a popular blogger, he is also the author of a standard undergraduate textbook in macroeconomics, and so is John Cochrane, editor of the “Grumpy Economist” blog, also author of a standard graduate textbook in financial economics. There are many other well-known academics, willing to reach new audiences. But John Quiggin claims that his own blog is much more popular than his research (on Australian policy issues), as well as Tyler Cowen (one of the contributor of the “Marginal Revolution” blog).

Another category might be blogs with several contributors, such as the “Monkey Cage” blog, dedicated to political science research. This blog contains contributions from several researchers, such as John Sides, Erik Voeten, Andrew Gelman, Joshua Tucker and Henry Farrell. But all those researchers have their own blogs besides.

To get back on my personal experience, the name freakonometrics was chosen for two reasons. The first one is that is was a common word among the community of researchers in the Economics department at Ecole Polytechnique. Freakonometrics meant using Econometrics techniques to answer real life questions. That might be opposed to Mathematical Econometrics, as it was taught to students. Since it was exactly the aim of the blog (write posts about simple questions, with a quantitative approach) I liked the idea of using it at a blog’s name. The second reason is related to a major influence for most bloggers in Economics. In 2005, University of Chicago economist Steven Levitt and New York Times journalist Stephen Dubner published a collection of “economic” articles, claiming that economics is, at root, the study of incentives. This is how freakonomics.com/blog/ started (the first post was published in September 2005). My blog was more about Econometrics than Economics. So I decided to use the freakonometrics name. But this was not a very good idea. If all bloggers know freakonomics, there is no doubt that the two blogs are unrelated. But unfortunately, outside this community, there is confusion (as I noticed many time while discussing with journalists for instance).

  • The Structure of the Blog, and the Contents

A blog is usually seen as chaotic, and unstructured. On the opposite, academia, and Science, are places where everything is supposed to be well ordered. Most undergraduate students can not understand where a professor answer “I have no idea what might happen if we relax that assumption“. An open question is a great source of inspiration for a blogger. It might be a start for a bibliographic survey, or on quick simulations. Thus, the blog “offers a unique space to simultaneously achieve efforts related to research, teaching, and service” as mentioned in Grollman (2014). Technically, as recalled in the introduction, a blog is just a series of entries called “posts“, even if, with several thousand words, some posts might also be called “articles“, as suggested in Cohen (2006).

Those posts might be to start a discussion after an open question in the class, to mention in interesting paper recently discovered, to point out an interesting conference, to provide some codes to generate a graph, to upload datasets used in an article about to be published, to criticize an article read in a newspaper, or just to share an experience. Therefore, “academic blog” is a generic term that includes several different genres, from blogs about academic life to pure research blogs (see Walker (2006) for a discussion), even if most of them are hybrid genres. As analyzed in Luzón (2006), making available personal research is a simple way not only to get feedback, but also to increase visibility, to develop “respect and reputation” (as discussed in Gregg (2009)). As claimed by the committee of hypotheses.org, blogs should be seen a lab books, not a substitute to academic journals (a post is not an article, as we will see in the next section) but more a catalogue of personal thoughts (and possibly some findings).

  • Academic Life and Blogging Activities

Traditionnaly, according to Merton (1942), academia was governed by some norms of universalism, communism, disinteresredness, and organized scepticism. More recently,  Ziman (1996) claims, on the contrary, that academia was characterized by inverse norms, of  proprietary, local, local, authoritarian, commissioned and expert norms. Blogging is probbly an answer to this shift. In this section, we will get back on interactions of blogging activity with more conventional academic duties.

  • Academic Hierarchy and Rules

Walker (2006) explains that  “blogging allowed us to circumvent the power structure of academica“. Blogs are somewhere between the conversational style of the conference, and the writen style of articles. But blogs are clearly outside those communities, blogs are much more accessible: you do not have to attend a conference, you don’t even have to sign up, and you are usually allowed to participate in the discussion, without belonging to the community (you should if you want to publish a response to a published article). Furthermore, the bureaucratisation of the universities has had “profound effects on the writing of academics” (as discussed in Brett (1991)). More specifically, it is nearly impossible for academics to provided a “public intellectual function” nowadays because it goes “against the grain of the job“. Blogs can be seen as an answer of academic, to regain some freedom of speech. As mentioned in Gordon (1998), tenure positions were initially created precisely to prevent academics from pressures, to grant them a strong freedom of speech. But in the 2010’s, tenure positions exist to pressure those who have only temporary contracts.

  • Blog Posts and Academic Articles

A blog entry is usually published without any pressure, with no one too impress, just the idea of working through some ideas and concepts, together, the writer and the reader. Academic articles are going through the peer review process. But it is only a peer-to-peer discussion. All those discussions are usually skipped in the final version, and there is no real place for a discussion, except for some controversial papers, where a discussion can be published. As mentioned in Chatterjee & Biswas (2011), some journals now have forums, but there is no place, otherwise to converse about a publish paper. Blogs can be seen as tools for “post-publication review of papers“. This is actually what happened in 2010, when Science published a paper (Sebastiani & al. (2010)), which was then criticized by several science blogs – about methodological fallacies, before being retracted (see Carmichael (2010) and MacArthur (2011) for a full discussion).

Blog posts offer a space not only to summarize a research paper (written in a codified and strict format), but also to discuss the process and the context of the study. As Gregg (2009) suggests, blogs provide a space “distinct from the parent culture of institutions“. A paper might start with a discussion with a colleague, then additional contributors might join to provide additional expertise. The paper is then written in a codified format, and to please potential reviewers. Those reviewers might ask for substantial changes, done because some co-authors really need a publication to apply for a position. Finally, the paper published might have to connexion at all with the initial discussion. Even if bloggers do not want to share with complete strangers the hidden side of a paper, it might be interesting to emphasize dead-ends, attempts that failed, and how the paper started.

  • An Academic Blogger is not a (Science) Journalist

Blogging can revamp the relationship between science and society, simply by making research more comprehensible to others, just like journalists do. There have always been connexions between the two communities: academics, who can understand what exactly is claimed in a research study, and science journalists, who can summarize and explain with less jargon. “Historically, academics have been put in a reactive position, responding to questions from reporters. Blogging places academics in a more proactive position, intervening more effectively in popular debates around the topics they research” as claimed by Jenkins (2008).

Actually, some academics do publish posts in blogs hosted by newspapers, such as “The Conscience of a Liberal” (by Paul Krugman). Somehow, those journals (here the New York Times) host academics the same way newspapers hosted some opinion pages, were academics where invited to give their point of view, a few years back. But, as Quiggin (2011) explains “newspapers are generally reluctant to repost on academic working papers and similar publications unless the conclusions are obviously newsworthly“. Economics in newspapers has to be related to macroeconomics, and science has to be related to medicine or technology (the science page is now a nice advertising page for the most recent smartphone applications). And, as mentioned by Cottrell (2013), a couple of decades ago, experts (not to say scholar) “have functioned as sources for newspaper journalists. Their opinions would emerge often mangled and simplified, always truncated, in articles over which they had no final control.” Now, with blogs, it is possible to read them directly, in a style that is easier to understand, compared with academic (standard) publications. “The general reader has access to expertise that was easily available, a decade ago, only to the insider or the specialist“. Writing a post in an academic blog is neither pretending being a journalist, nor writing an academic article. In the traditional process of research, we discuss with colleagues, possibly in conferences, but only the final publication remains. False starts and heuristics are skipped, because they might appear as un-necessary to get an understanding of the article. And this is exactly where blogging becomes interesting. In newspapers, contributors are required to avoid mathematical equations, and scientific jargon. Blogs are exactly the missing link between a technical research article, “jargonistic and turgid” (as Mewburn & Thomson (2013) wrote), and an article in a newspaper, usually overselling minor contributions.

  • The narrative style of a blog post

There is a stark contrast between the story used in a peer-reviewed paper, and the true, untold, research process. The messy side of a research project, and all the dead-ends might be mentioned, maybe as a quick anecdote, in a conference talk. But for junior researchers, it is not the place to discuss the true story (maybe only the shinny side of it). As mentioned in  Conole (2007), “the blogosphere is offering an alternative style of academic discourse“. A blog post is a place for snippets of the work, more informal, with a completely different style. As climed by Pierre Mounier and Marin Davis (from hypotheses.org) the blog is between the oral and the writen form.

Nevertheless, the tone, or the style of writing, is different from one blog to another one. Post can be writen as a reportage, with a journalistic style (where various sources of information are brought together and synthetised into a story), as a formal or more informal essay (where a story is writen, with opinions), it can be more pedagogical (with a lot of questions) or satirical (see  Mewburn & Thomson (2013) for a detailed comparison, with further examples).

But the time when Karl Sagan was criticized (see e.g. Emanuel (1986)) for mentioning a study in Parade (the  American nationwide Sunday newspaper magazine) before publishing it in Science is now far away. As written in Oreskes & Conway (2011), this has been considered as a”violation of scientific norms“. Similarly, because of its style, some researchers cannot consider a blog post as a serious contribution.

  • Blogging and Microblogging (Twitter)

Blogs are not the only place where academics are now visible. Academics are also extremely active on microblog platforms, such as Twitter. Twitter is different from blogs. Not only because of the lengths of the “posts“, but also because it is extremely codified (while blogs are clearly not). If academics can use blogs to educate, they can use Twitter to inform, to make interesting material available to the public.

  • From Official Rules to Unofficial Uses

Microblogging, on Twitter, is simple. One has to create an account, on twitter.com, and 160 characters can be used for a short bio, with the possible use of an avatar. It is possible to be active, by tweeting (posting online messages with less than 140 characters), or to passive (and then simply follow some discussions). One can use the search window, and then all the tweets containing that word (or that sentence) will appear. It is also possible to follow some hastags (following the # symbol) such as #overlyhonestmethods, or some people (following the @ symbol) such as @freakonometrics. When following that account, all the tweets (posted under the name @freakonometrics) will appear in the so-called TL (or timeline). I will then have a follower (and of course, I might follow back). On can tweet not only text, but also a picture and html links (that will be automatically shortern). Those are the official rules in Twitter.

There are also codes and uses. With a Twitter account, it is possible to write our own tweets, but also to retweet a tweet, which means forwarding another user’s tweet to all of our followers. One can use the retweet button, or use the old fashion RT (for re-tweet) or MT (for modified tweet) if it was necessary to shorten a tweet.  As in Academia, it is important to mention the source of the information when sharing it with some followers (it is called a mention, and it is like using a reference in a research paper). Another way of acknowledging the account that originally shared the content being tweeted is to HT the account (hat tip). It is also possible to discuss with other people. One can reply to a tweet using the reply button (when replying to one of my tweets, the tweet will start with @freakonometrics and only followers also following me will see it in their tweet list). It is also possible to poke another account to make sure that someone reads your tweet. It is possible to favorite a tweet, by clicking the yellow star next to the message. If a lot of the people I follow favorite a tweet, it will be more likeliy to appear in the discover window, even if I do not follow that account. Because of the 140 characters constraint, a lot of strange words can be used on Twitter, such as “OH” which most often means “overheard“. Twitter has also its own habits, such as the #FF follow Friday tradition. On Friday, it is common to share usernames of our favorite twitterers, the accounts you find interesting

  • Are Academics on Twitter ?

Only “one in forty scholars are active on Twitter” as estimated in Priem & al. (2012). And academics are mainly skeptical about Twitter. For most of them, Twitter is either for their kids, or perhaps for graduate students (if they have some ideas about what Twitter is). But not researchers. To get back on my personal experience, I follow (and am followed) by a lot of graduate students, PhD’s or postdocs, and a few more senior researchers. Most of those senior, prominent scientists, share extremely interesting information! But Twitter is time consumming (as blogs), and not everyone is willing to have an account on Twitter (and to be active, by sharing links, or by interacting, more generally).

If not everyone is on Twitter, important people are; at least in Economics. In January, was organized the annual conference of the American Economic Association, in Philadelphia. This is where the job market for PhD students in Economics takes place. It was extremely active on Twitter (updates where obtained by following frequently the #ASSA2014 hashtag). From my perspective (maybe also the people I follow), it looked like everyone there was on Twitter during that (major) event.

  • Twitter as a Bookmark

Like most of my colleagues, I do spend a lot of time online, reading articles, for work, for fun, and sometimes, it can be interesting to keep tracks of those articles, posts on blogs, and articles on arxiv.org or papers.ssrn.com. The first reason I use Twitter is that I need bookmarks. With Twitter, those bookmarks are public. It does not mean that I endorse what I read, it should be understood as I found that interesting, and I want to bookmark it, to find it, someday. As most academics, I do read a lot. And one of our duties is to share information. This was mentioned in Priem & Costello (2010) “the professional impact of Twitter may be particularly prononced for scholars given that sharing information is a central component of their work” (see also Letierce & al. (2010)). They estimated that 30% tweets sent by academics contain a hyperlink to a peer-reviewed ressource (usually a pdf file of a research paper). And it is not necessarily a paper published online 24 hours ago : it can also be a paper rediscovered accidently, or a technical report writen a few decades ago that has just been scanned.

Traditionally, academic visibility is measured using citations, meaning that some work has been accepted by (so called) peers, in the scientific community. Academics need to write to have an impact. But a lot of time is spent on reading. This reading activity is missed by standard citation counts (unless you publish a review, for instance).  On Twitter, you can comment, even (briefly) discuss a publication, some tricks on computer codes, share graphical visualization, etc. It is not like posting an anomynous comment on some scientific blog (actually, several blogs do not have open comments any more). On Twitter, there is some kind of credibility, and not only from the academic resume: people know you and follow you. According to Wagers (2012) “good content is propagated rapidly, bad content is not“. Of course, it is not that simple.

  • Live-Tweet in Conferences

Another popular use among academics is to use Twitter for live-tweets. But as mentioned on a lot of blogs, one should be careful about live-tweeting. Live tweeting is supposed to be fun, but stay polite, and respectful, and to use quotation marks as much as possible. Getting back on the so-called Twittergate (see McMillan Cottom (2012) for a complete summary), Aaron Bady, used that interesting image, about Twitter within the academic community “I conjured up the image of an appropriate cantankerous old professor yelling at a bunch of punk tweeters to get off his lawn, like Clint Eastwood in Gran Twittarino”.

Again, to get back on my personal experience, I usually do not live-tweet, I do not feel comfortable with it (I prefer to take notes in my book, even if I might also write a post on my blog later on, and I always ask the speaker if I can quote what he or she said). “Some worried that having someone tweet their insights before they publish might increase the likelihood that they will be scooped by a colleague — although others regarded that notion as slightly paranoid” as mentioned in Kolowich (2012). “The debate over live tweeting at conferences is, in many ways, about control and access: who controls conference space, presentation content, or access to knowledge?” wrote Risam (2012). Beyond those ethical considerations, there is also a more practical reason: in mathematics, it is quite difficult to live-tweet. It is difficult to write equations in Twitter, and to mention a graph without the formal model is useless (or misleading). There might some interest when there are computational issues, to share a nice visualization for instance.

  •  Twitter as an Online Faculty Lounge

 

It is also possible to start discussions on Twitter, but it has to remain a short discussion (to ask for precisions and references). An important issue in Twitter is to speed up connections between scientists. But there is nothing new here. Traditionally, scientists have always interacted with other scientists, in sort of one-to-one interactions, attending seminars, conferences, and discussion with colleagues. Using the words of Priem & al. (2013), “informal conversations have moved out of the faculty lounge to online social media platforms”, such as Twitter. One of the interests is to join somehow a larger “virtual department” with colleagues that are not next door, but who might be far away and in other areas of research. One can even discuss with real people, outside academia. Since I have interests in risk modeling, finance, climate, computer science, mathematics, I can also discuss with people working on stock markets, in insurance companies, in data visualization startups, even journalists. A important point is that it becomes possible to interact with open minded researchers. As mentioned in Fox (2012) – slightly changing the title – “blogging [and micro blogging] changed how economics share ideas”.

The first step in the scientific process is to find ideas, new ideas or concepts to investigate, datasets to describe. Following interesting people on Twitter can be the first step. The final step is to communicate findings and to disseminate. The time when researchers were studying the table of contents of journals to find interesting articles is behind us. When disseminating on a blog, we can share codes, graphs, datasets, links to additional material. On Twitter, we have to deal with the 140 characters constraint, which makes it hard. One idea can be to use a nice visualization, a graph, or a map.

  • Institutional Accounts and Outstitutional Ones

On Twitter, I mainly follow researchers, only a few institutional accounts. For instance, following @HarvardBiz is interesting to get updates about their blog, and recent articles (like using RSS feeds). But most of them are based on non-explicit rules, and controversial posts and articles are not mentioned. Even if there might be interesting experiences. Recently, academics behind the Nature Chemistry Twitter account went back on 4 years of experience (in Nature Chemistry (2013)). Among lessons learnt, collectively, it was mentioned that with the 140 characters constraint, “clarity is a virtue”, which is indeed one of the things you learn with Twitter. Furthermore, they mention that following important researchers in your scientific community on Twitter is interesting. Not only you can discover interesting information: when following personal accounts, one can also learn more personal information.

  • The Impact of Twitter in Standard Process of Academic Dissemination

Another possible motivation for researchers to be active on Twitter might be for dissemination. Using Twitter can help to reach other researchers, outside the (small) circle of one’s community, as well as journalists, or people working in the industry and for governmental organization, even politics. In a large study, Shuai & al. (2012) proved (following 4,600 papers) that papers mentioned on Twitter are more downloaded, and more cited (see also Eysenbach (2011) for a similar conclusion).

  • Blogging After Almost Six Years

After more than five years as an active blogger, I have the same diagnosis as Kotsko (2006), which mentions that blogging is “especially great for academics who would otherwise be quite isolated from other academics with similar interests“.

  • A Peaceful Island within Academia

To get back on one of the main personal reason why I am still blogging after almost six years, with enthusiasm, is because it is still a lot of fun. And it is a place that I appreciate all the more that academia is currently a very competitive place. I interact with the blogging community because I want to, while (most of the time) I interact with students and colleagues because I have to.  Blogs are probably the last place where we have a complete freedom. Even not to blog, if we do not want to.

There are currently two important issues in academia, when talking about money: tuition fees rise, and the decrease of public funding for research. Those past years, (undergraduate) students became consumers. And as a professor, I am now the seller in the store. Most of our students are no longer interested by the story behind a model; they want some recipes for their future job. They want to know what to use, and when. And since universities evaluate their professors, they ask students to fill evaluation forms. So professors do everything to get good evaluations. That is a simple and extremely rational game. But the pleasure of teaching is lost, sometimes. And similarly, the less money there is to do some research, the more competition. If there are one or two grants in my field of research, in Québec, I have no more colleagues and friends, I have only competitors. We now have to work on different tasks to add lines on our resume. As mentioned previously, it is difficult to find time just to discover something new, and spend some time investigating (without the insurance of getting a publication out of it).

Compared with those conventional academic activities (teaching, doing so research, writing a referee report, filling a form for a grant, etc) blogging is fun. Within the blogosphere, there is no competition, just motivation and stimulation. One can interact with other bloggers, learn from them, and so far, it is still a pleasure to blog. Some bloggers claim that it is a shame that blogging is not recognized (formally) within academia, but actually, it might be seen as a great opportunity. We do blog because we want to: it is not another required task. So it can still be fun…

  • Readers of the Blog

As claimed in Walker (2006), “blogs (…) are inherently social. Whether you have five readers or five hundreds doesn’t really matter, it’s the knowledge that you will be read that is important“. So, I have to confess that I blog mainly for myself, in the sense that I do not want to have a readership waiting for me to post something everyday! Also in the sense that my blog gives me a complete freedom to talk about things I find fun, with a whole person style (and actually I do use my blog to develop my own “writing voice“, as called in Walker (2006)), discussing about personal issues. I remember when I started blogging. At first, I thought that no one will read my blog, and then, friends and colleagues told me that they were. It is actually thrilling (not to say scary) to have 5,000 or 10,000 readers for a blog post, when one think about the number of readers of academic articles. As claimed by O’Connor (2007), “blogs are a more effective medium for intellectual influence than journal articles“. Somehow, it looks like academic journals try to avoid exposure: publishing an article in the Journal of Narrowly Focused Hyper Specialized Field Studies is a great place to hide one’s research.

The process of research is, indeed, a social activity, where we need to keep – and create – interactions with various researchers, read papers, keep our mind open. On the other hand, blogging is definitively a personal activity: I use my blog as a notebook, to keep traces of ideas, or codes, even graphs and interesting readings. From this perspective, blogging is extremely personal. But it is opened, and anyone can access it. So I use my blog to promote my work, and my scholarship. As Greg (2009), I see academic “blogging as conversational scholarship“. Blog are great to encourage conversation, they are coffee houses, in the sense of the XVIIth Century in England. In blog posts, we connect to other blogs, using comments, reactions, and hyperlinks. But actually, Derek de Solla Price (see Price (1986), cited in Horne (2011)) explained that “the prototype of the modern scientific paper is a social device rather than a technique for accumulating quanta of information“. In that sense, having informal discussions is probably the best way to work, as an academic. This is also the idea of Crane (1989), “the growth of scientific knowledge is a kind of diffusion process in which ideas are transmitted from person to person“. Using blogs, we can develop and connect a network of various people, from PhD students to practitioners in the industry, as well as more experienced academics who might share common interests. The blog is read by students, former students, colleagues, probably the dean, and even sometimes the department secretary.

  • A Cost-Benefit Analysis

With a simple cost-benefit utilitarist analysis, one will probably blog if benefits are more important than costs. One component is related to time issue: is blogging costing, or saving time? Academics receive frequently emails, asking for explanations on a technical question (from students, former students, or anyone actually) that will need a detailed email answer. It is possible to consider a “reply to public“, with a blog post. Similarly, while teaching, frequently the same question is asked every a year. From my own experience, the first year, I can write an answer in a blog post, and then, I can integrate it to my notes (blog posts can even be more interesting than lectures notes). I cannot believe that blogging is a waste of time, since I see my blog as a long-term memory. It is also possible to integrate material that cannot be used in notes, such as animations, or videos.

A lot of academics claim that “do not have time to waste blogging” but some of them can write extremely long, detailed (and most of the time, nicely structured) replies. If I write a detailed reply to a specific question (because I found the question interesting), why not share it? Either the answer is wrong (or imprecise), but then someone might post a comment, add a reference, a link, etc, or the answer is correct, and then, why not share it? With blogs, dissemination is immediate, as well as comments and feedback.

A lot of researchers within academia still reject blogs because they’re not serious, and not peer reviewed. Not being serious in the style does not mean that the study is not serious and it can still follow a scientific procedure (this is what we can learn from the Ig Nobels). Furthermore, blogs will be peer reviewed as soon as peers will blog. In blog, comments can even be more constructive than comments one gets from referees in a peer reviewed journal. Blog posts are published on the (open) web, not in some journal so expensive that no one can actually read it. Yes, commenting is not a formal or rigorous as a peer review publication, and having opened comments may contribute to establish quality and credibility of a blog. Having comments from the community is a great benefit.

  • Conclusion

With recent changes in academia, blogs are a perfect place to interact outside institutions (universities, societies and journals). Academics can find there not only a place for free speech, but also a place to interact with scholars outside the circle of hyper-specialized colleagues. Anne-Marie Slaughter recently said that “all the disciplines have become more and more specialized and more and more quantitative, making them less and less accessible to the general public” (quoted in Kristof (2014)). Blogs are the missing link from the jargon-based papers to the oversimplifying journalistic articles, between the conventional academic article, the lab notes, and the talk in a seminar. But one should keep in mind that “academic blogging can be an important medium, when it avoids the meta-narcissistic onanism of blogging about how important academic blogging is”, as claimed by Parr (2012).

  •  References

Adams, R. (2013). Blogging in context: Reviewing the academic library blogosphere. (D. 10.1108/EL-05-2012-0054, Éd.) Electronic Library , 31, 664-667.
Ashlin, A., & Ladle, R. (2006). Science communication: Environmental science adrift in the blogosphere. (D. 10.1126/science.1124197, Éd.) Science , 312.
Batts, S., Anthis, N., & Smith, T. (2008). Advancing science through conversations: Bridging the gap between blogs and the academy. (DOI:10.1371/journal.pbio.0060240, Éd.) PLoS Biology .
Bell, S. (2007, 11 1). To Blog Or Not To Blog – That Is An Academic’s Question. 02 01, 2014, http://acrlog.org/2007/11/01/to-blog-or-not-to-blog-that-is-an-academics-question/
Bik, H., & Goldstein, M. (2013). An Introduction to Social Media for Scientists . (D. 10.1371/journal.pbio.1001535, Éd.) PLoS Biol , 11.
Boyd, D., & Ellison, N. (2007). Social network sites: Definition, history and scholarship. (D. 10.1104/pp.64.6.1070, Éd.) J Comput Mediat Commun , 13.
Brett, J. (1991). The Bureaucratization of Writing: Why so Few Academics are Public Intellectuals. Meanjin , 50, pp. 513-522.
Carmichael, M. (2010, 07 07). The Little Flaw in the Longevity-Gene Study That Could Be a Big Problem. http://www.newsweek.com/little-flaw-longevity-gene-study-could-be-big-problem-74703
Charpentier, A., Coulmont, B., & Gombin, J. (2014, 02 11). Un homme, deux voix : le vote par procuration. 02 11, 2014, La Vie des idées: http://www.laviedesidees.fr/Un-homme-deux-voix-le-vote-par.html
Chatterjee, P., & Biswas, T. (2011). Blogs and Twitter in medical publications – too unreliable to quote, or a change waiting to happen? (D. 10.7196/samj.5213., Éd.) South African Medical Journal , 101, 712-714.
Chemistry, N. (2013). All you can tweet. Nature Chemistry , 5.
Cohen, D. (2006, 08 21). Professors, Start Your Blogs. http://www.dancohen.org/2006/08/21/professors-start-your-blogs/ .
Conole, G. (2007, 10 20). The nature of academic discourse. 02 01, 2014,  http://e4innovation.com/?p=45
Conole, G. (2007, 10 29). The paper vs. blog argument…. 02 01, 2014,  http://e4innovation.com/?p=56
Cottrell, R. (2013, 02 15). Net wisdom.  02 01, 2014, http://www.ft.com/intl/cms/s/2/009050e4-75ea-11e2-9891-00144feabdc0.html
Crane, D. (1989, 02 20). How Scientists Communicate. 02 01, 2014, http://garfield.library.upenn.edu/classics1989/A1989AU43700001.pdf
Emanuel, K. (1986). Nuclear winter: Towards a scientific exercise. Nature , 319.
Esarey, J. (2013, 08 11). Blogs and Academic Tenure. 02 01, 2014,  http://politicalmethodology.wordpress.com/2013/08/11/blogs-and-academic-tenure/
Ewins, R. (2005). Who are you? Weblogs and academic identity. . (D. 10.2304/elea.2005.2.4.368, Éd.) E-Learning , 368–377.
Eysenbach, G. (2011). Can Tweets Predict Citations? Metrics of Social Impact Based on Twitter and Correlation with Traditional Metrics of Scientific Impact. J Med Internet Res , 13.
Fister, B. (2012, 07 12). Serial Scholarship: Blogging as Traditional Academic Practice. 02 01, 2014, http://www.insidehighered.com/blogs/library-babel-fish/serial-scholarship-blogging-traditional-academic-practice
Fitzpatrick, K. (2010). Planned Obsolescence. Information Standards Quarterly , 22, pp. 14-19.
Fox, J. (2012). Can blogging change how ecologists share ideas? In economics, it already has. (D. 10.4033/iee.2012.5b.15.f, Éd.) Ideas in Ecology and Evolution , 5.
Gordon, L. (1998, 09). Tenure on Trial. 02 01, 2014, http://news.stanford.edu/stanfordtoday/ed/9809/9809fea201.shtml
Graham, T. (2004). Scholarly Communication . Serials: The Journal for the Serials Community , 13, 3-11.
Gregg, M. (2009). Blogging from the Ivory Tower Hot-Desk. (D. 10.1177/1354856509342345, Éd.) Convergence , 15, 470-483.
Gregg, M. (2006). Feeling Ordinary: Blogging as conversational scholarship. (DOI: 10.1080/10304310600641604) http://espace.library.uq.edu.au/eserv.php?pid=UQ:7740&dsID=FeelingOrdinary.htm
Grollman, E. (2014, 02 04). Blogging for (a) change. 02 06, 2014, http://conditionallyaccepted.com/2014/02/04/blogging-for-a-change/.
Gu, F., & Widén-Wulff, G. (2010). Scholarly Communication and Possible Changes in the Context of Social Media: a Finnish Case Study. . The Electronic Library , 29, pp. 762-776.
Halavais, A. (2006). Scholarly blogging: Moving towards the visible college. In A. &. J.Jacobs, Uses of Blogs (pp. 117–126). New York: Peter Lang.
Halford, J. (2012, 07 19). No Academic is an Island. http://earlymoderndialogues.wordpress.com/2012/07/19/no-academic-is-an-island/
Halliday, L. (2001). Scholarly communication, scholarly publication and the status of emerging formats. Information Research. , 6.
Herndon, T. (2013, 05 22). The Grad Student Who Took Down Reinhart And Rogoff Explains Why They’re Fundamentally Wrong. 02 01, 2014, Business Insider: http://www.businessinsider.com/herndon-responds-to-reinhart-rogoff-2013-4
Honeycutt, C., & Herring, S. (2009). Beyond Microblogging: Conversation and Collaboration via Twitter. Proceedings of the Forty-Second Hawai’i International Conference on System Sciences .
Horne, D. (2011). Research as a Social Process: Considerations for Academic Libraries. Faculty of Information Quarterly , 3.
Jenkins, H. (2008, 04 08). Why Academics Should Blog….  02 01, 2014,  http://henryjenkins.org/2008/04/why_academics_should_blog.html
Jenkins, R. (2013, 08 08). What’s a Blog Post Worth?  02 01, 2014,  http://chronicle.com/blogs/onhiring/whats-a-blog-post-worth/40591
Johnson, S. (1759). The History of Rasselas, Prince of Abissinia.
Jones, K. (2013, 11 20). Should You Blog Anonymously? 02 01, 2014,  http://www.blogherald.com/2013/11/20/blog-anonymously/
Jurgenson, N. (2012, 01 23). How Academics Can Become Relevant. 02 01, 2014,  http://thesocietypages.org/cyborgology/2012/01/23/how-academics-can-become-relevant/
Kaufman, S. (2007, 11 1). An Enthusiast’s View of Academic Blogs. 02 01, 2014,  http://www.insidehighered.com/views/2007/11/01/kaufman
Kirkup, G. (2010). Academic blogging: Academic practice and academic identity. (D. 10.1080/14748460903557803, Éd.) London Review of Education , 8.
Kolowich, S. (2012, 10 02). The Academic Twitterazzi. 02 01, 2014,  http://www.insidehighered.com/news/2012/10/02/scholars-debate-etiquette-live-tweeting-academic-conferences
Kotsko, A. (2007, 11 1). A Skeptic’s Take on Academic Blogs . 02 01, 2014,  http://www.insidehighered.com/views/2007/11/01/kotsko
Kotsko, A. (2006, 05 23). On Academic Blogging: A Diagnosis. 02 01, 2014,  http://kotsko.blogspot.ca/2006/05/on-academic-blogging-diagnosis.html
Kristof, T. (2014, 02 16). Professors, We Need You!  02 16, 2014,  http://www.nytimes.com/2014/02/16/opinion/sunday/kristof-professors-we-need-you.html
Laden, G. (2008, 12 1). Anonymity & Credibility.  02 01, 2014,  http://scienceblogs.com/gregladen/2008/12/01/anonymity-credibility/
Lariv, M. (2013, 01 29). How Blogging Helped Me Write My Dissertation. 02 01, 2014,  http://chronicle.com/article/How-Blogging-Helped-Me-Write/136893/
Leo, C. (2008). Why the academic world needs blogs. 0 2 01, 2014,  http://christopherleo.com/about/why-the-academic-world-needs-blogs/
Letierce, J., Passant, A., Decker, S., & Breslin, J. (2010). Understanding how twitter is used to spread scientific messages. Web Science Conference,. Raleigh, NC.
Lupton, D. (2012, 05 22). Where are all the sociology blogs? 02 01, 2014,  http://simplysociology.wordpress.com/2012/05/22/where-are-all-the-sociology-blogs
Luzón, M. (2006). Research group blogs: sites for self-presentation and collaboration. 5th AELFE Conference. http://www.unizar.es/aelfe2006/ALEFE06/5.newtechnologies/87.pdf.
Luzón, M. (2009). Scholarly Hyperwriting:The Function of Links in Academic Weblogs. (D. 10.1002/asi.20937, Éd.) Journal of the American Society for Information Science and Technology , 60.
MacArthur, D. (2011, 07 21). Longevity genetics study retracted from Science. 02 01, 2014,  http://www.wired.com/wiredscience/2011/07/longevity-genetics-study-retracted-from-science/
Maitzen, R. (2012). Scholarship 2.0: Blogging and/as academic practice. (D. 10.1080/13555502.2012.689502, Éd.) Journal of Victorian Culture , 17.
Maron, N., & Smith, K. (2006, 11 06). Scholarly Communication.  02 01, 2014,  http://www.arl.org/sc/models/model-pubs/pubstudy/index.shtml
Marsh, A. (2013, 01 28). The boundaries of academic blogging. 02 01, 2014,  http://blogs.lse.ac.uk/impactofsocialsciences/2013/01/28/the-boundaries-of-academic-blogging/
McMillan Cottom, T. (2012, 09 30). An Idea is a Dangerous Thing to Quarantine #twittergate. 02 01, 2014, http://tressiemc.com/2012/09/30/an-idea-is-a-dangerous-thing-to-quarantine-twittergate/
Merton, R. (1942). A Note on Science and Democracy. Journal of Legal and Political Sociology , 1, 115-126.
Mewburn, I., & Thomson, P. (2013). Why do academics blog? An analysis of audiences, purposes and challenges. (D. 10.1080/03075079.2013.835624, Éd.) Studies in Higher Education , 38, 1105-1119 .
Mortensen, T., & Walker, J. (2002). Blogging Thoughts: Personal Publication as an Online Research Tool. In A. Morrison http://possibleworlds.blogs.com/blogsperiment/files/Researching_ICTs_in_context-Ch11-Mortensen-Walker.pdf (Éd.), Research ICTs in Context. University of Olso Press.
Nicolas, D., & Rowlands, I. (2011). Social Media Use in the Research Workflow. Information Sevices and Use , 31, pp. 61-83.
Noel, H. (2013, 08 09). What is tenure for? . 02 01, 2014, http://mischiefsoffaction.blogspot.ca/2013/08/what-is-tenure-for.html
O’Connor, B. (2007, 03 27). Seth Roberts and academic blogging. 02 01, 2014, http://brenocon.com/blog/2007/03/seth-roberts-and-academic-blogging/
Oreskes, N., & Conway, E. (2011). Merchants of Doubt: How a Handful of Scientists Obscured the Truth on Issues from Tobacco Smoke to Global Warming. Bloomsbury Press.
Park, H., & Thelwall, M. (2006). Web-science communication in the age of globalization. (D. 10.1177/1461444806065660, Éd.) New Media & Society , 629-650.
Parr, C. (2012, 11 1). Blog-standard turn-offs for social media neophytes. 02 1, 2014, http://www.timeshighereducation.co.uk/421669.article
Porst, S. (2003, 04 16). Why weblogs are rarely used to document research (2). 02 01, 2014, mathemagenic: http://blog.mathemagenic.com/2003/04/16.html
Price, D. (1986). Little Science, Big Science… and Beyond. Columbia University Press.
Priem, J., & Costello, K. (2010, 10 22). How and why scholars cite on Twitter.02 01, 2014, http://jasonpriem.org/self-archived/Priem_Costello_Twitter.pdf
Priem, J., Piwowar, H., & Hemminger, B. (2012, March 20). Altmetrics in the wild: Using social media to explore scholarly impact. arXiv:1203.4745.http://arxiv.org/abs/1203.4745.
Quiggin, J. (2011a). Economic Blogs. (D. 10.1111/j.1759-3441.2011.00148.x, Éd.) Economic Papers , 30, 437-440.
Quiggin, J. (2011b.) Economic Blogs and Blog Economics. In A. B. Jacobs, Uses of Blogs.
Reid, A. (2011). On the value of academic blogging . http://alex-reid.net/2011/03/on-the-value-of-academic-blogging.html .
Riesch, H., & Mendel, J. (2014). Science Blogging: Networks, Boundaries and à Limitations. (DOI:10.1080/09505431.2013.801420, Éd.) Science as Culture , 23 (5), pp. 51–72.
Risam, R. (2012, 09 30). Conference Live Tweets: Twitter Good or #Twittergate? 02 01, 2014, http://roopikarisam.com/2012/09/30/conference-live-tweets-twitter-good-or-twittergate/
Sebastiani, P., & al., e. (2010, 07 01). Genetic signatures of exceptional longevity in humans. Science .
Shema, H., Bar-Ilan, J., & Thelwall. (2012, 05 11). Research Blogs and the Discussion of Scholarly Information . (D. 10.1371/journal.pone.0035869, Éd.) PLOS One .
Shuai, X., Pepe, A., & Bollen, J. (2012). How the Scientific Community Reacts to Newly Submitted Preprints: Article Downloads, Twitter Mentions, and Citations. DOI: 10.1371/journal.pone.0047523.
Siemens, G. (2007, 10 06). Academic bloggers. 02 01, 2014, on http://www.elearnspace.org/blog/2007/10/06/academic-bloggers/
Silva, L. (2013, 05 12). So You Want to Blog (Academic Edition) . From http://www.insidehighered.com/blogs/university-venus/so-you-want-blog-academic-edition
Snow, C. (1963). The Two Cultures: a second look. . (C. U. Press., Éd.)
Tenopir, C., Volentine, R., & King, D. (2013). Social media and scholarly reading. (D. 10.1108/OIR-04-2012-0062, Éd.) Online Information Review , 193-216.
Veletsianos, G. (2012). Higher education scholars’ participation and practices on Twitter. (D. 10.1111/j.1365-2729.2011.00449.x, Éd.) Journal of Computer Assisted Learning , 28, 336-349 .
Wagers, S. (2012, 12 07). Don’t Have Time to Tweet-bollocks! Twitter can even save you time as a scientist. 02 01, 2014, http://mendelspod.com/blog/dont-have-time-to-tweet-bollocks
Walker, J. (2006). Blogging from inside the ivory tower. In A. B. Jacobs, Uses of blogs.
Walker, J. (2009). Weblog. Dans M. K.-L. David Herman, & T. &. Francis (Éd.), Routledge Encyclopedia of Narrative Theory.
Weller, K., & Puschmann, C. (2011). Twitter for scientific communication: How can citations/references be identified and measured? http://journal.webscience.org/500/1/153_​paper.pdf. (Éd.), Web Science Conference, 2011. Germany.
Wilcox, C. (2012). It’s time to e-volve: Taking responsibility for science communication in a digital age. (http://www.biolbull.org/content/222/2/85.full, Éd.) Biological Bulletin , 222, 85-87.
Wilkins, J. (2008). The roles, reasons and restrictions of science blogs. (D. 10.1016/j.tree.2008.05.004, Éd.) Trends Ecol Evol , 23.
Wilkinson, D., Harries, G., Thelwall, M., & Price, E. (2003). Motivations for academic web site interlinking: evidence for the Web as a novel source of information on informal scholarly communication . (D. 10.1177/016555150302900105, Éd.) Journal of Information Science , 29.
Ylijoki, O.-H. (2013). Boundary work between work and life in the high-speed university. (D. 10.1080/03075079.2011.577524, Éd.) Studies in Higher Education , 38, 242-255.
Ziman, J. (1996). Post-Academic Science: Constructing Knowledge With Netwroks and Norms. Science Studies , 9, 67-80.

Ἀγεωμέτρητος μηδεὶς εἰσίτω

comme disait Platon (si on en croit la légende). J’aurais pu aussi mettre comme titre “retour sur un malentendu“. Le débat de jeudi soir, sur le Big Data, était quelque peu déstabilisant. Pour revenir un peu sur l’histoire complète (j’avais promis de faire un billet sur tout ce que j’aurais pu apprendre pendant la soirée), j’avais été contacté pour participer à un débat sur le Big Data il y a quelques semaines. J’avais alors accepté, et suggéré d’inviter aussi Yves-Alexandre, que je n’avais jamais rencontré, mais dont j’avais vu passer des études, et que je suivais sur Twitter. Après discussion avec Sophie, on s’était dit qu’il serait pertinent d’avoir des intervenants complémentaires, connaissant en particulier les réseaux, le droit des réseaux, ou pour parler de data visualization, ou de data journalism. Bref, Sophie a contacté Vincent Gautrais et Jean-Hugues Roy, ce qui permettais d’élargir le panel, et ne pas avoir uniquement le point de vue de statisticiens. J’ai découvert le titre en voyant l’affiche, et je me disais que d’un point de vue communication, associer Big Data et Big Brother était une stratégie qui devait pouvoir se justifier. Et effectivement, en quelques heures toutes les places avaient été réservées. Depuis, j’avoue avoir beaucoup travaillé, car j’étais anxieux à l’idée de débattre sur un sujet aussi technique, dans un pavillon du campus des sciences. Je voulais éviter des imprécisions sur les données utilisées en climatologie et en météorologie. Je voulais éviter d’embrouiller si on parlait d’aspects techniques de parallélisation des algorithmes. Je voulais éviter d’être confus si on parlait inférence causale (même si sur ce point, Yves-Alexandre aurait pu éclairer le débat). Mais je n’avais pas imaginer que le débat tourne autour de Big Brother, du sentiment de violation de la vie privée, et des angoisses que cela causait. Après coup, je me rends compte que j’étais probablement un usurpateur (je n’ai pas été le seul à m’en rendre compte, compte tenu de quelques réactions que nous avons eu par la suite) dans le “débat” car il aurait mieux valu avoir un psychologue, ou un sociologue, pour parler de psychose collective face aux technologies, ou du sentiment de viol (de sa vie privée). Je pense aussi qu’un spécialiste en bureautique, pour expliquer comment désactiver les options de publicité ciblée sous Google, aurait été utile puisque plusieurs remarques portait sur ce point. Cela dit, je mettais de guillemets à “débat” car beaucoup d’interventions de la salle étaient plus des commentaires que des questions destinées aux quatre “experts” sollicités (mais peut-être est-ce une manière classique de débattre dans certains lieux). J’avoue avoir été surpris par le fait que, dans un débat tenu dans un cadre universitaire, le public applaudisse un discours partisan, une forme de témoignage. Comme je l’avais dit, je n’ai pas l’habitude de participer à ce genre de débat public, et j’ai trouvé cela déconcertant à plusieurs reprises. Pour l’anecdote, j’avais l’impression en discutant avec Sophie, qui animait le débat, qu’une opposition serait faite entre “sciences dures” et “sciences molles” (mathématiciens opposés au journaliste et au juriste), mais finalement, on a surtout ressenti une animosité de la part de certaines personnes dans la salle (dont plusieurs ont pris la parole). Afin d’illustrer la malentendu évoqué au début, je me contenterais de m’auto-troller, afin d’offrir un témoignage d’un des intervenants, justement, en mettant en commentaire un message qu’il s’est permis de nous envoyer à tous les quatre vendredi après-midi.

Maintenant, il y a eu quelques commentaires (voire quelques questions pertinentes) sur lesquelles je voulais revenir. Et mentionner des points qui ont été évoqué lorsque la discussion s’est poursuivie à quatre (avec les autres intervenants, dans un vrai bar cette fois, pas “des Sciences”). Histoire d’introduire le débat, j’avais voulu évoquer un exemple qui a beaucoup fait parler ces derniers jours (je pense à Who owns real-time sports data?), et qui me semblait parfaitement introduire le débat. Un match de basket, c’est ça, par exemple, avec plusieurs joueurs qui bougent vite, des joueurs qui attaquent, d’autre qui défendent, et un ballon.

Maintenant, il faut savoir que toute l’action se retrouve sous forme de data, via des caméras qui permettent de tout numériser. Bref, on passe d’observations en direct, en trois dimensions, qui évoluent dans le temps, à des “données”. Ce que j’appellerais des données complexes. Avec en plus, une reconnaissance des joueurs, qui peuvent être suivis, même s’ils bougent (très) vite.

Si on fait un petit dessin, on peut faire une analyse spatio-temporelle de l’action de jeux (et analyser les prises de décision).

Si on en croit wikipedia, le Big Data, c’est les 3V: volume (avec des gros volumes de données – ici on part d’images vidéos, analysées, dont on extrait des informations, au dixième de seconde près), vélocité (le but est de faire des analyses en temps réel, de comprendre les décisions des joueurs, comme tirer, ou passer la balle à un coéquipier) et variété (on a toute sorte de données, effectivement, avec des localisations, des noms de joueurs, des propriétés sur les joueurs, car on a accès aux statistiques des joueurs, on sait qui est bon pour marquer à trois points, qui défend bien). Bref, j’avais envie d’illustrer le Big Data avec cet exemple qui me semble représentatif (même si un peu idyllique, car peu de monde a accès à des données aussi incroyables). Cela dit, ces données sont les observations d’individus consentants, qui semblent éloignées des préoccupations de certaines personnes qui souhaitaient que le débat porte sur la vie privée.

Parmi les points que j’ai noté, Vincent a évoqué une intervention de Michel Serres qui permet de réfléchir, effectivement,

Par la suite, on a pu parler longuement de ce qu’est l’anonymat des données (qui a été peu abordé pendant le débat proprement dit, malheureusement) en revenant sur Unique in the Crowd: The privacy bounds of human mobility, étude à laquelle a participé Yves-Alexandre, sur les métadonnées, et le fait que “human mobility traces are highly unique. In fact, in a dataset where the location of an individual is specified hourly, and with a spatial resolution equal to that given by the carrier’s antennas, four spatio-temporal points are enough to uniquely identify 95% of the individuals” (on pourra aussi relire MIT News à ce sujet)

La discussion sur les métadonnées était passionnante: les métadonnées sont suffisantes pour faire des études simples (comme l’étude sur la grippe de Google, évoquée dans un précédant billet, que j’avais écrit en vue de préparer le débat, mais on peut penser aux cartes en temps réel indiquant l’état du trafic sur les routes). Mais cette étude remet beaucoup de chose en cause, sur l’anonymat des métadonnées. Sujet passionnant, s’il en est !

Je n’ai pas noté grand chose, malheureusement, donc si des personnes ont pris des notes, avec des exemples ou des commentaires intéressants, je serais ravi de les relayer.

Data, Datum

Ce soir, je quitte ma cabane d’ermite pour intervenir dans la conférence Big Data: Big Brother organisé au Cœur des Sciences, où un peu plus de trois cent personnes se sont inscrites en quelques heures.

On va parler de data. Et depuis, je me prépare autant que possible. Comme le rappelle l’article Data is data, or are they?,

Data emerged in 1646 as the plural of the Latin datum, which according to the Oxford English Dictionary was the past participle of dare (“give”) and meant “a thing given or granted; a thing known or assumed as a fact, and made the basis of reasoning or calculation; a fixed starting point for a series of measurements etc.” Datum remains standard and retains the general meaning of “a unit of information”, though it tends to appear mostly in academic and specialist disciplines such as philosophy, surveying, geodesy, topography, technical drawing, and cartography. The meaning of the derived plural data has changed somewhat over the centuries. The OED definition from the late 19th century (“Facts, esp. numerical facts, collected together for reference or information”) seems to testify to the broadening influence of the hard sciences. In the 20th century, the rapidly expanding fields of information technology incorporated the word into a huge variety of IT- and computer-related compound nouns, such as database, data entry, data flow, data mining, data processing, data protection, and data stream. The plural data is used in many scientific, technical, academic and other formal contexts, though different practices prevail in different places. In computing jargon, social sciences, and everyday use, data is often treated as an abstract mass noun, like information. It has the general meaning “mass of information”

Ce n’est pas dans mes habitudes de parler devant autant de monde, pour une discussion ouverte selon les termes de Sophie. J’ai quand même préparer deux images pour illustrer ce qu’est le big data, en ppt. La suite ce soir (ou dans un prochain billet si j’arrive à prendre suffisamment de notes au fur et à mesure des interventions).

Temperatures Series as Random Walks

Last year, I did mention in a post that unit-root tests are dangerous, because they might lead us to strange models. For instance, in a post, I did obtain that the temperature observed in January 2013, in Montréal, might be considered as a random walk process (or at leat an integrated process). The code to extract the data has changed (since the website has been updated), so here, we use

library(RCurl)
library(XML)
options(RCurlOptions = list(useragent = "R"))
HEURE=0:23
extracttemp=function(Y,M,D){
url=paste(
"http://climate.weather.gc.ca/climateData/hourlydata_e.html?timeframe=1&Prov=QC&StationID=5415&Year=",Y,"&Month=",
M,"&Day=",D,sep="")
wp <- getURLContent(url)
doc <- htmlParse(wp, asText = TRUE) 
docName(doc) <- url
tmp <- readHTMLTable(doc)
basejour=data.frame(Year=Y,Month=M,Day=D,
Hour=HEURE,Temp=as.numeric(as.character(data.frame(tmp[2])[,2]))[2:25])
return(basejour)}
B=NULL
for(y in 1955:2013){
for(d in 1:31){
B=rbind(B,extracttemp(y,1,d))}}

Here are all the temperatures observed, and 2013,

plot(B$X,B$Temp,cex=.5,col="light blue",xlab="January, in Montreal",ylab="Temperature (Celsius)")
I=which(B$Year==2013)
lines(B$X[I],B$Temp[I],col="red")

In the previous post, one test only was used, and one year was considered. I was wondering if this behavior was observed only with temperature of 2013 (or not), and how the other tests (mentioned in a previous post too) were performing.

I might need a function, because those tests cannot be used if there is a missing value, even only one. So I did use the value observed one hour before (just to make sure that the tests can be done)

correcty=function(Y){
I=which(is.na(Y))
	if(length(I)==0){Yc=Y}
	if(length(I)>0){Yc=Y;for(i in I) Yc[i]=Yc[i-1]}	
return(Yc)
}

Now, we can compute the p-values, for all the years, and the three different three (keeping in mind that two test if the series is non-stationary, and one if the series is stationary)

DF=matrix(NA,2013-1954,3)
library(urca)
for(y in 1955:2013){
Z=B$Temp[which(B$Year==y)]
	Zc=correcty(Z)
	DF[y-1954,2]=as.numeric(pp.test(Zc)$p.value)	
	DF[y-1954,1]=as.numeric(kpss.test(Zc)$p.value)	
	DF[y-1954,3]=as.numeric(adf.test(Zc)$p.value)
}

Visually, if red means stationary, and blue means non-stationary, we get

DFP=DF
DFP[,1]=DF[,1]<.05
DFP[,2:3]=DF[,2:3]>.05
library(RColorBrewer)
CL=brewer.pal(6, "RdBu")
plot(0:1,0:1,xlim=c(1950,2015),ylim=c(0,3),axes=FALSE,xlab="",ylab="")
axis(1)
text(1952,.5,"KPSS")
text(1952,1.5,"PP")
text(1952,2.5,"ADF")
for(y in 1955:2013){
for(i in 1:3){
polygon(y+c(-1,-1,1,1)/2.2,i-.5+c(-1,1,1,-1)/2.2,col=CL[1+(DFP[y-1954,i]==1)*5],border=NA)}}

Quite frequently, we conclude that the temperature is a random walk. Which does not make sense (from a physical point of view). But again, it might come from the fact that temperature are stationary, but with some fractional behavior (as suggested in the previous post).

Unit Root Tests

This week, in the MAT8181 Time Series course, we’ve discussed unit root tests. According to Wold’s theorem, if https://latex.codecogs.com/gif.latex?(Y_t) is  (weakly) stationnary then

https://latex.codecogs.com/gif.latex?%20%20%20%20%20Y_{t}=\sum%20_{{j=0}}^{\infty%20}\psi_{j}\varepsilon%20_{{t-j}}+\xi%20_{t}

where https://latex.codecogs.com/gif.latex?%20(\varepsilon%20_{{t}}) is the innovation process, and where https://latex.codecogs.com/gif.latex?%20(\xi%20_{{t}}) is some deterministic series (just to get a result as general as possible). Observe that

https://latex.codecogs.com/gif.latex?\sum%20_{{j=0}}^{{\infty%20}}|\psi_{{j}}|^{2}%20%3C%20\infty

as discussed in a previous post. To go one step further, there is also the Beveridge-Nelson decomposition : an integrated of order one process, defined as

https://latex.codecogs.com/gif.latex?%20%20%20%20%20\Delta%20Y_{t}=(1-L)%20Y_t=\sum%20_{{j=0}}^{\infty%20}\psi_{j}\varepsilon%20_{{t-j}}+\xi=\Psi(L)\varepsilon%20_{{t}}+\xican be represented as

a linear trend https://latex.codecogs.com/gif.latex?+ a random walk https://latex.codecogs.com/gif.latex?+ a stationary remaining term

i.e.

https://latex.codecogs.com/gif.latex?%20Y_{t}=\underbrace{Y_0%20+%20\xi%20t%20%C2%A0}+\underbrace{\Psi(1)\sum_{j=1}^t\varepsilon%20_{{i}}}+\underbrace{\tilde\Psi(L)\varepilon_0-\tilde\Psi(L)\varepsilon_t}

where https://latex.codecogs.com/gif.latex?%20\tilde\Psi(\cdot) is the polynomial with terms https://latex.codecogs.com/gif.latex?%20\tilde\psi_j, where

https://latex.codecogs.com/gif.latex?%20\tilde\psi_j%20=\sum_{i=j+1}^\infty\psi_i

For unit-root tests, we will use various representation of the process. In order to illustrate the implementation of those tests, consider the following series

> E=rnorm(240)
> X=cumsum(E)
> plot(X,type="l")
  • Dickey Fuller (standard)

Here, for the simple version of the Dickey-Fuller test, we assume that

https://latex.codecogs.com/gif.latex?%20Y_t=\alpha+\beta%20t+\varphi%20Y_{t-1}+\varepsilon_t

and we would like to test if https://latex.codecogs.com/gif.latex?%20\varphi=1 (or not). We can write the previous representation as

https://latex.codecogs.com/gif.latex?%20\Delta%20Y_t=\alpha+\beta%20t+[\varphi-1]%20Y_{t-1}+\varepsilon_t

so we simply have to test if the regression coefficient in the linear regression is – or not – null. Which can be done with Student’s test. If we consider the previous model without the linear drift, we have to consider the following regression

> lags=0
> z=diff(X)
> n=length(z)
> z.diff=embed(z, lags+1)[,1]
> z.lag.1=X[(lags+1):n]
> summary(lm(z.diff~0+z.lag.1 ))

Call:
lm(formula = z.diff ~ 0 + z.lag.1)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.84466 -0.55723 -0.00494  0.63816  2.54352 

Coefficients:
         Estimate Std. Error t value Pr(>|t|)
z.lag.1 -0.005609   0.007319  -0.766    0.444

Residual standard error: 0.963 on 238 degrees of freedom
Multiple R-squared:  0.002461,	Adjusted R-squared:  -0.00173 
F-statistic: 0.5873 on 1 and 238 DF,  p-value: 0.4442

Our testing procedure will be based on the Student’s t value,

> summary(lm(z.diff~0+z.lag.1 ))$coefficients[1,3]
[1] -0.7663308

which is exactly the value computed using

> library(urca)
> df=ur.df(X,type="none",lags=0)
> df

############################################################### 
# Augmented Dickey-Fuller Test Unit Root / Cointegration Test # 
############################################################### 

The value of the test statistic is: -0.7663

The interpretation of this value can be done using critical values (99%, 95%, 90%)

> qnorm(c(.01,.05,.1)/2)
[1] -2.575829 -1.959964 -1.644854

If the statistics exceeds those values, then the series is not stationnary, since we cannot reject the assumption that https://latex.codecogs.com/gif.latex?%20\varphi-1=0. So we might conclude that there is a unit root. Actually, those critical values are obtained using

> summary(df)

############################################### 
# Augmented Dickey-Fuller Test Unit Root Test # 
############################################### 

Test regression none 

Call:
lm(formula = z.diff ~ z.lag.1 - 1)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.84466 -0.55723 -0.00494  0.63816  2.54352 

Coefficients:
         Estimate Std. Error t value Pr(>|t|)
z.lag.1 -0.005609   0.007319  -0.766    0.444

Residual standard error: 0.963 on 238 degrees of freedom
Multiple R-squared:  0.002461,	Adjusted R-squared:  -0.00173 
F-statistic: 0.5873 on 1 and 238 DF,  p-value: 0.4442

Value of test-statistic is: -0.7663 

Critical values for test statistics: 
      1pct  5pct 10pct
tau1 -2.58 -1.95 -1.62

The problem with R is that there are several packages that can be used for unit root tests. Just to mention another one,

> library(tseries)
> adf.test(X,k=0)

	Augmented Dickey-Fuller Test

data:  X
Dickey-Fuller = -2.0433, Lag order = 0, p-value = 0.5576
alternative hypothesis: stationary

We do have here also a test where the null hypothesis is that there is a unit root. But the p-value is quite different. What is odd is that we have

> 1-adf.test(X,k=0)$p.value
[1] 0.4423705
> df@testreg$coefficients[4]
[1] 0.4442389

(but I think it is a coincidence).

  • Augmented Dickey Fuller

It is possible to had some lags in the regression. For instance, we can consider

https://latex.codecogs.com/gif.latex?%20\Delta%20Y_t=\alpha+\beta%20t+[\varphi-1]%20Y_{t-1}+\psi%20\Delta%20Y_{t-1}+\varepsilon_t

Again, we have to check if one coefficient is null, or not. And this can be done using Student’s t test.

> lags=1
> z=diff(X)
> n=length(z)
> z.diff=embed(z, lags+1)[,1]
> z.lag.1=X[(lags+1):n]
> k=lags+1
> z.diff.lag = embed(z, lags+1)[, 2:k]
> summary(lm(z.diff~0+z.lag.1+z.diff.lag ))

Call:
lm(formula = z.diff ~ 0 + z.lag.1 + z.diff.lag)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.87492 -0.53977 -0.00688  0.64481  2.47556 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)
z.lag.1    -0.005394   0.007361  -0.733    0.464
z.diff.lag -0.028972   0.065113  -0.445    0.657

Residual standard error: 0.9666 on 236 degrees of freedom
Multiple R-squared:  0.003292,	Adjusted R-squared:  -0.005155 
F-statistic: 0.3898 on 2 and 236 DF,  p-value: 0.6777

> summary(lm(z.diff~0+z.lag.1+z.diff.lag ))$coefficients[1,3]
[1] -0.7328138

This value is the one obtained using

> df=ur.df(X,type="none",lags=1)
> summary(df)

############################################### 
# Augmented Dickey-Fuller Test Unit Root Test # 
############################################### 

Test regression none 

Call:
lm(formula = z.diff ~ z.lag.1 - 1 + z.diff.lag)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.87492 -0.53977 -0.00688  0.64481  2.47556 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)
z.lag.1    -0.005394   0.007361  -0.733    0.464
z.diff.lag -0.028972   0.065113  -0.445    0.657

Residual standard error: 0.9666 on 236 degrees of freedom
Multiple R-squared:  0.003292,	Adjusted R-squared:  -0.005155 
F-statistic: 0.3898 on 2 and 236 DF,  p-value: 0.6777

Value of test-statistic is: -0.7328 

Critical values for test statistics: 
      1pct  5pct 10pct
tau1 -2.58 -1.95 -1.62

And again, other pckages can be used:

> adf.test(X,k=1)

	Augmented Dickey-Fuller Test

data:  X
Dickey-Fuller = -1.9828, Lag order = 1, p-value = 0.5831
alternative hypothesis: stationary

Hopefully, the conclusion is the same (we should reject the assumption that the series is stationary, but I am not sure about the computation of the p-value).

  • Augmented Dickey Fuller with trend and drift

So far, we have not included the drift in our model. But this is simple to do (this will be called the augmented version of the previous procedure): we just have to include a constant in the regression,

> summary(lm(z.diff~1+z.lag.1+z.diff.lag ))

Call:
lm(formula = z.diff ~ 1 + z.lag.1 + z.diff.lag)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.91930 -0.56731 -0.00548  0.62932  2.45178 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)  
(Intercept)  0.29175    0.13153   2.218   0.0275 *
z.lag.1     -0.03559    0.01545  -2.304   0.0221 *
z.diff.lag  -0.01976    0.06471  -0.305   0.7603  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.9586 on 235 degrees of freedom
Multiple R-squared:  0.02313,	Adjusted R-squared:  0.01482 
F-statistic: 2.782 on 2 and 235 DF,  p-value: 0.06393

The statistics of interest are obtained here considering some analysis of variance outputs, where this model is compared with the one without the integrated part, and the drift,

> summary(lm(z.diff~1+z.lag.1+z.diff.lag ))$coefficients[2,3]
[1] -2.303948
> anova(lm(z.diff ~ z.lag.1 + 1 + z.diff.lag),lm(z.diff ~ 0 + z.diff.lag))$F[2]
[1] 2.732912

Those two values are the ones obtained also with

> df=ur.df(X,type="drift",lags=1)
> summary(df)

############################################### 
# Augmented Dickey-Fuller Test Unit Root Test # 
############################################### 

Test regression drift 

Call:
lm(formula = z.diff ~ z.lag.1 + 1 + z.diff.lag)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.91930 -0.56731 -0.00548  0.62932  2.45178 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)  
(Intercept)  0.29175    0.13153   2.218   0.0275 *
z.lag.1     -0.03559    0.01545  -2.304   0.0221 *
z.diff.lag  -0.01976    0.06471  -0.305   0.7603  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.9586 on 235 degrees of freedom
Multiple R-squared:  0.02313,	Adjusted R-squared:  0.01482 
F-statistic: 2.782 on 2 and 235 DF,  p-value: 0.06393

Value of test-statistic is: -2.3039 2.7329 

Critical values for test statistics: 
      1pct  5pct 10pct
tau2 -3.46 -2.88 -2.57
phi1  6.52  4.63  3.81

And we can also include a linear trend,

> temps=(lags+1):n
> summary(lm(z.diff~1+temps+z.lag.1+z.diff.lag ))

Call:
lm(formula = z.diff ~ 1 + temps + z.lag.1 + z.diff.lag)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.87727 -0.58802 -0.00175  0.60359  2.47789 

Coefficients:
              Estimate Std. Error t value Pr(>|t|)  
(Intercept)  0.3227245  0.1502083   2.149   0.0327 *
temps       -0.0004194  0.0009767  -0.429   0.6680  
z.lag.1     -0.0329780  0.0166319  -1.983   0.0486 *
z.diff.lag  -0.0230547  0.0652767  -0.353   0.7243  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.9603 on 234 degrees of freedom
Multiple R-squared:  0.0239,	Adjusted R-squared:  0.01139 
F-statistic:  1.91 on 3 and 234 DF,  p-value: 0.1287

> summary(lm(z.diff~1+temps+z.lag.1+z.diff.lag ))$coefficients[3,3]
[1] -1.98282
> anova(lm(z.diff ~ z.lag.1 + 1 + temps+ z.diff.lag),lm(z.diff ~ 1+ z.diff.lag))$F[2]
[1] 2.737086

while R function returns

> df=ur.df(X,type="trend",lags=1)
> summary(df)

############################################### 
# Augmented Dickey-Fuller Test Unit Root Test # 
############################################### 

Test regression trend 

Call:
lm(formula = z.diff ~ z.lag.1 + 1 + tt + z.diff.lag)

Residuals:
     Min       1Q   Median       3Q      Max 
-2.87727 -0.58802 -0.00175  0.60359  2.47789 

Coefficients:
              Estimate Std. Error t value Pr(>|t|)  
(Intercept)  0.3227245  0.1502083   2.149   0.0327 *
z.lag.1     -0.0329780  0.0166319  -1.983   0.0486 *
tt          -0.0004194  0.0009767  -0.429   0.6680  
z.diff.lag  -0.0230547  0.0652767  -0.353   0.7243  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.9603 on 234 degrees of freedom
Multiple R-squared:  0.0239,	Adjusted R-squared:  0.01139 
F-statistic:  1.91 on 3 and 234 DF,  p-value: 0.1287

Value of test-statistic is: -1.9828 1.8771 2.7371 

Critical values for test statistics: 
      1pct  5pct 10pct
tau3 -3.99 -3.43 -3.13
phi2  6.22  4.75  4.07
phi3  8.43  6.49  5.47
  • KPSS test

Here, in the KPSS testing procedure, two models can be considerd : with a drift, or with a linear trend. Here, the null hypothesis is that the series is stationnary.

With a drift, the code is

> summary(ur.kpss(X,type="mu"))

####################### 
# KPSS Unit Root Test # 
####################### 

Test is of type: mu with 4 lags. 

Value of test-statistic is: 0.972 

Critical value for a significance level of: 
                10pct  5pct 2.5pct  1pct
critical values 0.347 0.463  0.574 0.73

while it will be, in the case there is a trend

> summary(ur.kpss(X,type="tau"))

####################### 
# KPSS Unit Root Test # 
####################### 

Test is of type: tau with 4 lags. 

Value of test-statistic is: 0.5057 

Critical value for a significance level of: 
                10pct  5pct 2.5pct  1pct
critical values 0.119 0.146  0.176 0.216

One more time, it is possible to use another package to get the same test (but again, a different output)

> kpss.test(X,"Level")

	KPSS Test for Level Stationarity

data:  X
KPSS Level = 1.1997, Truncation lag parameter = 3, p-value = 0.01

> kpss.test(X,"Trend")

	KPSS Test for Trend Stationarity

data:  X
KPSS Trend = 0.6234, Truncation lag parameter = 3, p-value = 0.01

At least, there is some kind of consistency, since we keep rejecting the stationnary assumption, for that series.

  • Philipps-Perron test

The Philipps-Perron test is based on the ADF procedure. The code is here

> PP.test(X)

	Phillips-Perron Unit Root Test

data:  X
Dickey-Fuller = -2.0116, Truncation lag parameter = 4, p-value = 0.571

with again, a possible alternative with the other package

> pp.test(X)

	Phillips-Perron Unit Root Test

data:  X
Dickey-Fuller Z(alpha) = -7.7345, Truncation lag parameter = 4, p-value
= 0.6757
alternative hypothesis: stationary
  •  Comparison

I will not spend more time comparing the different codes, in R, to run those tests. Let us spend some additional time on a quick comparison of those three procedure. Let us generate some autoregressive processes, with more or less autocorrelation, as well as some random walk, and let us see how those tests perform :

> n=100
> AR=seq(1,.7,by=-.01)
> P=matrix(NA,3,31)
> M1=matrix(NA,1000,length(AR))
> M2=matrix(NA,1000,length(AR))
> M3=matrix(NA,1000,length(AR))

> for(i in 1:(length(AR)+1)){
+ for(s in 1:1000){
+ if(i==1) X=cumsum(rnorm(n))
+ if(i!=1) X=arima.sim(n=n,list(ar=AR[i]))
+ library(urca)
+ M2[s,i]=as.numeric(pp.test(X)$p.value)
+ M1[s,i]=as.numeric(kpss.test(X)$p.value)
+ M3[s,i]=as.numeric(adf.test(X)$p.value)
+ }}

Here, we would like to count how many times the p-value of our tests exceed 5%,

> prop05=function(x) mean(x>.05)
+ P[1,]=1-apply(M1,2,prop05)
+ P[2,]=apply(M2,2,prop05)
+ P[3,]=apply(M3,2,prop05)
+ }
> plot(AR,P[1,],type="l",col="red",ylim=c(0,1),ylab="proportion of non-stationnary 
+ series",xlab="autocorrelation coefficient")
> lines(AR,P[2,],type="l",col="blue")
> lines(AR,P[3,],type="l",col="green")
> legend(.7,1,c("ADF","KPSS","PP"),col=c("green","red","blue"),lty=1,lwd=1)

 

We can see here how poorly Dickey-Fuller test behave, since a 50% (at least) of our autoregressive processes are considered as non-stationnary.

Le Vote par Procuration en France

La Vie des Idées a mis en ligne, ce matin, un court texte, écrit par Baptiste Coulmont (a.k.a. @coulmont) et Joël Gombin (a.k.a. @joelgombin), auquel j’ai très modestement contribué, intitulé “Un homme, deux voix. Le vote par procuration“.

Alors que sur son blog, Baptiste a rajouté pas mal d’information sur le vote par procuration en France (et le contexte général, en particulier pourquoi autant de partis courtisent certaines personnes en les incitant à voter par procuration), et sur les bases de données, je voulais en profiter pour mettre en ligne quelques codes utilisés dans l’article, et en particulier, mentionner des graphiques non-utilisés car plus difficile à interpréter, mais à mon avis plus juste en terme de modèle (comme les conclusions étaient les mêmes, on a retenu des graphiques plus classiques). Rappelons tout d’abord qu’on analyse non pas le vote à partir de données individuelles (ceci ne peut s’obtenir, le vote étant encore secret en France), mais à partir des résultats des différents bureaux de vote (c’est la notion de corrélation écologique évoquée dans le texte, à cause du problème potentiel d’ecological fallacy). Moyennant toutes ces précautions d’usage, on a essayé d’analyser les données à notre disposition.

  • Modèle de régression, et recherche de variables explications

Pour faire une régression, et expliquer le taux de procurations dans un bureau de vote, ma première idée était de dire que  où  est le nombre de procurations dans le bureau de vote , et où  est (au choix) le nombre d’électeurs inscrits ou le nombre d’électeurs ayant voté. On suppose ici que , la proportion d’électeurs qui a voté par procuration peut être fonction de divers variables explicatives. Les variables, elles sont dans la base suivante (je renvoie vers le blog de Baptiste pour les bases que l’on utilise, en particulier à partir des données de insee.fr et d’opendata.paris.fr)

> bt1=read.table("paris2007-pres-t1.csv",header=TRUE,sep=";")
> bt2=read.table("paris2007-pres-t2.csv",header=TRUE,sep=";")
> bv=read.table("paris-bv-insee-07.csv",header=TRUE,sep=";")
> bv$BV=bv$BVCOM
> baset1=merge(bt1,bv,by="BV")
> baset2=merge(bt2,bv,by="BV")
> baset1$LOGEMENT=baset1$PROPRIO+baset1$LOCNONHLM+baset1$LOCHLM+baset1$GRATUIT
> baset2$LOGEMENT=baset2$PROPRIO+baset2$LOCNONHLM+baset2$LOCHLM+baset2$GRATUIT

Si on suppose que  est fonction  le taux de logements occupés par leur propriétaire, dans le quartier (associé à un bureau de vote),

> variable="PROPRIO"
> reference="LOGEMENT"
> baset1$taux=baset1[,variable]/baset1[,reference]
> baset2$taux=baset2[,variable]/baset2[,reference]

il est légitime de tenter une régression logistique,

voire un lissage par splines, si on pense que le lien peut ne pas être linéaire,

Ceci se fait à l’aide du code suivant, pour la version lissée (par splines cubiques)

> b=hist(baset1$taux,plot=FALSE)
> library(splines)
> regt1=glm(PROCURATIONS/INSCRITS~bs(taux,6),family=binomial,weights=INSCRITS,data=baset1)
> regt2=glm(PROCURATIONS/INSCRITS~bs(taux,6),family=binomial,weights=INSCRITS,data=baset2)
> u=seq(min(baset1$taux)+.015,max(baset1$taux)-.015,by=.001)
> ND=data.frame(taux=u)
> ug=seq(0,max(baset1$taux)+.05,by=.001)
> pt1=predict(regt1,newdata=ND,se=TRUE,type="response")
> pt2=predict(regt2,newdata=ND,se=TRUE,type="response")
> library(RColorBrewer)
> CL=brewer.pal(6, "RdBu")
> plot(ug,ug*1,col="white",xlab=nom,ylab="Taux de procuration",
+ ylim=c(0,.1))
> for(i in 1:(length(b$breaks)-1)){
+ polygon(b$breaks[i+c(0,0,1,1)],c(0,b$counts[i],b$counts[i],0)
+ /max(b$counts)*.05,col="light yellow",border=NA)}
> polygon(c(u,rev(u)),c(pt1$fit+2*pt1$se.fit,rev(pt1$fit-2*pt1$se.fit)),
+ border=NA,density=30,col=CL[4])

et, pour la régression logistique standard (linéaire)

> lines(u,pt1$fit,col=CL[6],lwd=2)
> polygon(c(u,rev(u)),c(pt2$fit+2*pt2$se.fit,rev(pt2$fit-2*pt2$se.fit)),
+ border=NA,density=30,col=CL[3])
> lines(u,pt2$fit,col=CL[1],lwd=2)
> regt1l=glm(PROCURATIONS/INSCRITS~taux,family=binomial,weights=INSCRITS,data=baset1)
> regt2l=glm(PROCURATIONS/INSCRITS~taux,family=binomial,weights=INSCRITS,data=baset2)
> ND=data.frame(taux=ug)
> pt1l=predict(regt1l,newdata=ND,se=TRUE,type="response")
> pt2l=predict(regt2l,newdata=ND,se=TRUE,type="response")
> lines(ug,pt1l$fit,col=CL[5],lty=2)
> lines(ug,pt2l$fit,col=CL[2],lty=2)
> legend(0,.1,c("Second Tour","Premier Tour"),col=CL[c(1,6)],
+ lwd=2,lty=1,border=NA)

(en rajoutant une petite légende, avec une visualisation pour les deux tours de l’élection présidentielle, avec un intervalle de confiance sur la prévision de mon taux de procuration).

On peut faire la même chose sur le taux de logement HLM, dans le quartier,

Les dessins sont parlant, mais dans la sortie du modèle de régression, l’interprétation de  laisse à désirer (toute suggestion est la bienvenue !).

> summary(regt1l)

Call:
glm(formula = PROCURATIONS/INSCRITS ~ taux, family = binomial, 
    data = baset1, weights = INSCRITS)

Deviance Residuals: 
     Min        1Q    Median        3Q       Max  
-12.9549   -1.5722    0.0319    1.6292   13.1303  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -3.70811    0.01516  -244.6   <2e-16 ***
taux         1.49666    0.04012    37.3   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 12507  on 836  degrees of freedom
Residual deviance: 11065  on 835  degrees of freedom
AIC: 15699

Number of Fisher Scoring iterations: 4

> summary(regt2l)

Call:
glm(formula = PROCURATIONS/INSCRITS ~ taux, family = binomial, 
    data = baset2, weights = INSCRITS)

Deviance Residuals: 
     Min        1Q    Median        3Q       Max  
-15.4872   -1.7817   -0.1615    1.6035   12.5596  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -3.24272    0.01230 -263.61   <2e-16 ***
taux         1.45816    0.03266   44.65   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 9424.7  on 836  degrees of freedom
Residual deviance: 7362.3  on 835  degrees of freedom
AIC: 12531

Number of Fisher Scoring iterations: 4

 

On a alors voulu comparer avec un modèle qui me semble moins juste, mais qui est plus simple à interpréter, où on suppose que le taux de procuration (par bureau) est expliqué par un modèle linéaire

(que l’on peut aussi lisser, pour vérifier que le lien est effectivement linéaire). Le code est ici

> regt1=lm(PROCURATIONS/INSCRITS~bs(taux,6),weights=INSCRITS,data=baset1)
> regt2=lm(PROCURATIONS/INSCRITS~bs(taux,6),weights=INSCRITS,data=baset2)
> u=seq(min(baset1$taux)+.015,max(baset1$taux)-.015,by=.001)
> ND=data.frame(taux=u)
> ug=seq(0,max(baset1$taux)+.05,by=.001)
> pt1=predict(regt1,newdata=ND,se=TRUE,type="response")
> pt2=predict(regt2,newdata=ND,se=TRUE,type="response")
> library(RColorBrewer)
> CL=brewer.pal(6, "RdBu")
> plot(ug,ug*1,col="white",xlab=nom,ylab="Taux de procuration",
+ ylim=c(0,.1))
> for(i in 1:(length(b$breaks)-1)){
+ polygon(b$breaks[i+c(0,0,1,1)],c(0,b$counts[i],b$counts[i],0)
+ /max(b$counts)*.05,col="light yellow",border=NA)}
> polygon(c(u,rev(u)),c(pt1$fit+2*pt1$se.fit,rev(pt1$fit-2*pt1$se.fit)),
+ border=NA,density=30,col=CL[4])
> lines(u,pt1$fit,col=CL[6],lwd=2)
> polygon(c(u,rev(u)),c(pt2$fit+2*pt2$se.fit,rev(pt2$fit-2*pt2$se.fit)),
+ border=NA,density=30,col=CL[3])
> lines(u,pt2$fit,col=CL[1],lwd=2)
> regt1l=lm(PROCURATIONS/INSCRITS~taux,weights=INSCRITS,data=baset1)
> regt2l=lm(PROCURATIONS/INSCRITS~taux,weights=INSCRITS,data=baset2)
> ND=data.frame(taux=ug)
> pt1l=predict(regt1l,newdata=ND,se=TRUE,type="response")
> pt2l=predict(regt2l,newdata=ND,se=TRUE,type="response")
> lines(ug,pt1l$fit,col=CL[5],lty=2)
> lines(ug,pt2l$fit,col=CL[2],lty=2)
> legend(0,.1,c("Second Tour","Premier Tour"),col=CL[c(1,6)],
+ lwd=2,lty=1,border=NA)

(j’ai tout mis d’un coup cette fois, les modèles lissés et linéaires, l’un à la suite de l’autre)

Cette fois, on a une sortie de régression plus classique,

> summary(regt1l)

Call:
lm(formula = PROCURATIONS/INSCRITS ~ taux, data = baset1, weights = INSCRITS)

Weighted Residuals:
    Min      1Q  Median      3Q     Max 
-1.9994 -0.2926  0.0011  0.3173  3.2072 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept) 0.021268   0.001739   12.23   <2e-16 ***
taux        0.054371   0.004812   11.30   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.646 on 835 degrees of freedom
Multiple R-squared:  0.1326,	Adjusted R-squared:  0.1316 
F-statistic: 127.7 on 1 and 835 DF,  p-value: < 2.2e-16

> summary(regt2l)

Call:
lm(formula = PROCURATIONS/INSCRITS ~ taux, data = baset2, weights = INSCRITS)

Weighted Residuals:
    Min      1Q  Median      3Q     Max 
-2.9029 -0.4148 -0.0338  0.4029  3.4907 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept) 0.033909   0.001866   18.17   <2e-16 ***
taux        0.079749   0.005165   15.44   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.6934 on 835 degrees of freedom
Multiple R-squared:  0.2221,	Adjusted R-squared:  0.2212 
F-statistic: 238.4 on 1 and 835 DF,  p-value: < 2.2e-16

On note plusieurs choses de ces graphiques. (i) les deux types de régression donnent des modèles pour les taux de procuration très très proches. Donc autant prendre le plus simple à interpréter. (ii) le lissage n’apporte rien, et le modèle linéaire semble pertinent. On a ainsi regardé plusieurs variables, et on en a retenu un certain nombre, pour tenter un modèle multiple. Avant de parler des résidus de notre modèle, je devrais peut être prendre quelques lignes pour parler un peu de cartographie (une nouvelle fois, Baptiste m’a fait découvrir de belles fonctions).

  • Visualiser les bureaux de vote à Paris

Pour récupérer le fond de carte, avec les bureaux de vote, on utilise (là encore, je renvoie au blog de Baptiste, qui explique l’utilisation de données de cartelec.net)

> library(maptools)
> library(rgdal)
> library(classInt)
> paris=readShapeSpatial("paris-cartelec.shp")

Si on veut visualiser, par exemple, le taux de procuration (disons la moyenne entre les deux tours), on utilise les données suivantes

> elec=data.frame()
> elec=cbind(bt1$BV,(bt1$PROCURATIONS+bt2$PROCURATIONS),(bt1$EXPRIMES+bt2$EXPRIMES))
> colnames(elec)=c("BV","PROCURATIONS","EXPRIMES")
> elec=as.data.frame(elec)
> elec$BV=bt1$BV

Ensuite, viennent les fonctions graphiques, où on va passer d’un taux à une classe et d’une classe à une couleur,

> m=match(paris$BUREAU,elec$BV)
> plotvar=100*elec$PROCURATIONS/elec$EXPRIMES
> nclr=7
> plotclr=brewer.pal(nclr,"RdYlBu")[nclr:1] 
> class=classIntervals(plotvar[m], nclr, style="fisher",dataPrecision=1)
> colcode=findColours(class, plotclr)

Reste à conclure, en faisant une visualisation graphique de nos données

> par(mar=c(1,1,1,1))
> plot(paris,col=colcode,border=colcode)
> legend(656274.9, 6867308,legend=names(attr(colcode,"table")), 
+ fill=attr(colcode, "palette"), cex=1, bty="n",
+ title="Frequence procurations (%)")

Histoire de conclure, on peut regarder un peu nos résidus, obtenus sur un modèle linéaire. Considérons un modèle sur seulement trois variables explicatives,

> regt1=lm(PROCURATIONS/INSCRITS~I(POP65P/POP)+
+ I(PROPRIO/LOGEMENT)+I(CS3/POP1564),weights=INSCRITS,data=baset1)

Dans ce cas, la visualisation des résidus donne

> m=match(paris$BUREAU,elec$BV)
> plotvar=100*residuals(regt1)
> nclr=7
> plotclr=brewer.pal(nclr,"RdYlBu")[nclr:1] 
> class=classIntervals(plotvar[m], nclr, style="fisher",dataPrecision=1)
> colcode=findColours(class, plotclr)
> par(mar=c(1,1,1,1))
> plot(paris,col=colcode,border=colcode)
> legend(656274.9, 6867308,legend=names(attr(colcode,"table")), 
+ fill=attr(colcode, "palette"), cex=1, bty="n",title="Residus")

Idéalement, il faudrait avoir un beau bruit (spatial), c’est à dire avoir des couleurs réparties aléatoirement sur Paris. Il reste encore pas mal de régions dont les voisins sont de la même couleurs, et on repère quelques quartiers atypiques, avec soit des résidus importants négativement, ou positivement. Comme toujours, en modélisation, on pourrait passer des heures pour essayer de capturer tous les effets, mais aucun modèle avec les variables à notre disposition nous a permis de faire réellement mieux.

La Loi des Petits Nombres

Comme nous l’avions vu dans Charpentier (2010), la loi des grands nombres est souvent évoquée pour justifier la mutualisation des risques indépendants : plus la mutualité sera grande, plus petite sera la variabilité. A condition que les risques ne soient pas trop grands. Un cas problématique sont les risques catastrophiques : ils sont rares, et parfois tellement (potentiellement) coûteux que l’hypothèse d’existence de la variance doit être remise en cause. La modélisation de ces événements rares repose sur la loi des petits nombres, pour reprendre le nom de l’ouvrage de Ladislaus Bortkiewicz. Comme nous allons le voir, la loi de Poisson est à la loi des petits nombres ce que la loi normale est à la loi des grands nombres. Nous verrons donc en détails l’importance de la loi de Poisson pour modéliser les événements rares. Et nous verrons pourquoi la probabilité qu’un événement ne survienne pas est toujours de 37%. Ou presque.

  • La loi de Siméon-Denis Poisson

Siméon-Denis Poisson a travaillé sur les calculs des probabilités pendant presque 20 ans, de 1820 à 1840. Comme pour beaucoup de ses contemporains, ses premiers travaux portèrent sur des problèmes de jeux, avec une communication à l’Académie des Sciences sur l’avantage du banquier au jeu de trente et quarante. Mais son premier travail conséquent a porté sur un problème longuement étudié par Laplace (qui a été un de ses professeurs) sur la proportion des naissances des filles et des garçons (problème classique de statistique) publié en 1830. Il y présente en particulier une démonstration de la loi des grands nombres pour une loi de Bernoulli, qu’il modifiera par la suite. Dans ce mémoire, il y présente une loi, qui sera appelée plus tard la loi de Poisson : « la probabilité qu’une événement dont la chance à chaque épreuve est la fraction très petite x/mu n’arrive pas plus de n fois dans un très grand nombre d’épreuve mu d’épreuve (pour reprendre la terminologie utilisée en 1837, ce qui correspond à la fonction de répartition avec une terminologie plus contemporaine) est »

P=\left(1+x+\frac{x^2}{1\cdot 2}+\frac{x^3}{1\cdot 2\cdot 3}+\cdots+\frac{x^n}{1\cdot 2\cdots n}\right)e^{-x }

On verra réapparaître cette loi dans son fameux traité, paru en 1837, Recherches sur la probabilité des jugements. En particulier, dans le chapitre 8, il obtient sa loi comme limite de la loi binomiale B(T,\lambda /T) lorsque T devient grand, mais passe assez rapidement à d’autres considérations. Il n’étudie pas cette loi limite, et ne propose pas vraiment de l’utiliser. Il faudra attendre les travaux de Ladislaus Bortkiewicz, presque un siècle plus tard, pour voir des applications, dans Das Gesetz der kleinen Zahlen (la loi des petits nombres). Et comme toujours en mathématiques, lui attribuer tout le crédit est un peu excessif, puisqu’un siècle auparavant, en 1718, de Moivre avait obtenu la loi même loi, toujours comme limite de la loi Binomiale. Si le nom de Poisson est resté à la postérité, c’est essentiellement parce que Boltzmann le cite, en 1868, ainsi que Seidel en 1876, et surtout Tchebychev. Cela dit, Poisson était loin d’être un inconnu, en tant que scientifique. Ses travaux l’ont amené à travailler sur des problèmes d’électrostatique (la fameuse équation de Poisson), à l’équation de la chaleur avec Fourier, il s’est opposé à Fresnel sur des problèmes d’optique, et il a présidé à deux reprises l’Académie des Sciences.

La distribution d’un nombre d’événements obtenu comme somme de variables de Bernoulli, dans une grande population, peut s’approcher par la loi de Poisson. Ou lorsque la probabilité de survenance est faible, par rapport à la taille de l’échantillon. Formellement, si N suit une loi binomiale B(n,p), avec p\sim\lambda/n, alors

P[N=k]=\binom{n}{k}p^k(1-p)^{n-k}\sim e^{-\lambda}\frac{\lambda^k}{k!}

On parlera de loi des petits nombres car on compte ici des évènements rares (la probabilité de survenance d’un évènement étant inversement proportionnelle à n.

Ce résultat se traduit de la manière suivante: si on se donne un échiquier, 10\times 10 si on lance 100 pièces et si on compte le nombre de pièces par case, la distribution suivra une loi de Poisson de moyenne 1 (le ratio entre le nombre de pièces et de cases). Une illustration est évoquée sur la Figure suivante

Nombre de pièces par case

Fréquence

Loi de Poisson

0

36

36,78

1

39

36,78

2

16

18,39

3

7

6,13

4

2

1,53

5 et plus

0

0,37

  • L’utilisation de la loi de Poisson, et les mélanges Poissonniens

A la fin du XIXème siècle, à l’université de Göttingen, Wilhelm Lexis (connu en démographie pour les diagrammes qui portent aujourd’hui son nom) avait envie d’étudier les applications de cette loi. Alors qu’il étudiait l’utilisation de la loi de Poisson dans un contexte démographique, un de ces étudiants, Ladislaus Bortkiewicz, proposa de l’utiliser pour modéliser des nombres d’accidents. Dans l’exemple fameux de Bortkiewicz, datant de 1898, il avait étudié le nombre de cavaliers morts par ruade de cheval, entre 1875 et 1894, dans 10 corps (soit 200 corps annuels). Il avait obtenu la distribution donnée dans la table suivante. La colonne de droite donne le nombre de décès donné par une loi de Poisson de même moyenne. On note que l’ajustement est très bon, et on comprend vite la fascination de Bortkiewicz pour cette loi.

Nombre de décès, par corps

Fréquence

Loi de Poisson

0

109

108,67

1

65

66,21

2

22

20,22

3

3

4,11

4

1

0,63

5 et plus

0

0,08

Maintenant, pour être tout à fait honnête, la loi que nous avons énoncée comme une loi des petits nombres est très différente de celle proposée par Bortkiewicz (celle qu’il a énoncé dans son ouvrage lui a valu un cinglant commentaire de Corrado Gini, qui écrivait en 1907, de manière provocatrice que « the law of small numbers does not exist »). Mais l’utilisation de la loi de Poisson pour modéliser les accidents, et plus généralement les petits nombres venait de débuter.

Dans les années 1930, Filip Lunderg avait noté l’intérêt (théorique) du processus de Poisson pour modéliser l’arrivée des sinistres, ainsi que plusieurs actuaires de l’école scandinave (Esscher en 1932, Sgerdahl en 1939, Lüders en 1934, etc). Depuis, tous les actuaires ont utilisé cette loi pour modéliser toute sorte d’évènements “rares“, comme la survenance annuelle d’ouragans, aux États-Unis (Figure ci-dessous).

Nombre d’ouragans

Fréquence

Loi de Poisson

0

30

27,16

1

48

47,99

2

37

42,41

3

29

24,98

4

8

11,03

5

3

3,90

6

3

1,15

7

1

0,29

8 et plus

0

0,08

  • De la loi de Poisson à la période de retour

Un concept fondamental en gestion des risques extrêmes a été introduit par Emil Gumbel en 1958, liant le temps qui s’écoule (en années) entre deux événements consécutifs et la probabilité (annuelle) de survenance. Pour des événements se produisant avec une probabilité annuelle T, indépendamment les uns des autres, le temps moyen d’attente entre deux événements est T, appelé période de retour, et la probabilité qu’aucun événement ne survienne pendant n années (consécutives) est alors

\mathbb{P}\left(N>n\right)=\left(1-\frac{1}{T}\right)^n

On peut résumer ceci dans le tableau ci-dessous, où on étudie probabilité qu’un événement ne survienne pas pendant n années (en ligne) en fonction de la période de retour T. Pour un événement centenaire, il y a 36,60% chance pour qu’il ne survienne pas en cent ans.

Période de retour (en années)

Nombre d’années
sans catastrophe

10

20

50

100

200

10

34,86%

59,87%

90,43%

90,43%

95,11%

20

12,15%

35,84%

66,76%

81,79%

90,46%

50

0,51%

7,69%

36,41%

60,50%

77,83%

100

0,00%

0,59%

13,26%

36,60%

60,57%

200

0,00%

0,00%

1,75%

13,39%

36,69%

  • La probabilité qu’un événement ne survienne pas est… 37%

Comme on le voit dans le tableau ci-dessus, sur la diagonale, la probabilité qu’un événement ne survienne pas pendant T années quand sa période de retour est T est de l’ordre de 37%. D’ailleurs, si on revient un instant sur notre échiquier 10\times 10 évoqué auparavant, la probabilité de n’avoir aucune pièce sur une case était de… 37%

Reprenons ici un exemple qui avait fait polémique il y a quelques années, sur le risque nucléaire. Dans un article (Laponche et Dessus, 2011), on apprenait que la probabilité d’avoir un incident majeur sur réacteur nucléaire, pour une année, était de l’ordre de 0,0003 (3 chances sur 10,000). Or comme il y a 143 réacteurs nucléaires, sur 30, la probabilité d’avoir un incident majeur est (selon le calcul des auteurs)

\underbrace{143\times30}_n\times\underbrace{0,0003}_p\sim 129\%

D’où la conclusion savoureuse, « la probabilité d’occurrence d’un accident majeur sur ces parcs serait donc […] de plus de 100% pour l’Union européenne ». Comme on l’a noté, la probabilité d’avoir un incident majeur sur une période n lorsque la probabilité annuelle est p s’écrit

P[N\leq n]=1-(1-p)^n \sim np

en utilisant un développement limité non justifié ici ! Il convient ici d’utiliser le modèle de Poisson. La probabilité d’avoir au moins un incident majeur est

P[N\leq n]=1-(1-p)^n \sim 1-e^{-np}

soit ici 72,47%. Si la probabilité était traduite en terme de durée

p=\frac{1}{T}

avec

T=\frac{1}{0,0003\times143}\sim 23,31

on voit qu’ici, le temps moyen d’attente entre deux incidents majeurs en Europe est de 23 ans. Ce qui fait que sur 23 ans, la probabilité de n’avoir aucun incident majeur est de l’ordre de 37%.

  • Conclusion

La loi de Poisson est présente partout en assurance, car c’est la loi centrale pour modéliser le comptage d’événements rares. Le nombre de décès dans un portefeuille d’assurance-vie suit une loi de Poisson, tout comme le nombre d’accidents par unité de temps en assurance automobile. On retrouve même l’estimateur Chain Ladder du montant de provisions pour sinistres a payer quand on utilise une régression de Poisson sur les incréments de paiements.

Personal Analytics with RSS Feeds

I am currently working on a paper on Academic Blogging, from my own experience. And I wanted to do something similar to Stephen Wolfram’s personal analytics of my life. More specifically, I wanted to understand when I do post my blog entries. If I post more entries during office hours, then it should mean that, indeed, I consider my blog as a part of my job (which is something I believe, actually). On the other hand, if I post more in the evening, or in the middle the night, then it could mean that my blog is clearly only for fun, and somehow outside the official academic time schedule.

With the help of @3wen, we have here a function that can read rss feeds, and extract the publication date (and other pieces of information actually),

> library(XML)
> library(dplyr)
> baseRSS <- function(adresse){
+   doc <- try(xmlTreeParse(adresse))
+   if(length(doc)>1){
+   lesArticles <- xpathApply(r <- xmlRoot(doc), "//item") 
+   infosUneEntree <- function(x){
+   title <- sapply(xpathApply(x, "//title"), xmlValue)
+   links <- sapply(xpathApply(x, "//link"), xmlValue)
+   pubDate <- sapply(xpathApply(x, "//pubDate"), xmlValue)
+ return(cbind(title = title, links = links, pubDate = pubDate))
+ }
+ df <- lapply(lesArticles, infosUneEntree)
+ df <- data.frame(do.call("rbind", df))
+ return(df)
+ }
+ else{return(NA)}
+ }

The trick is that the page containing the rss feeds is truncated: you get only 30 post (the latest ones). With WordPress, you can easily go further (thanks @3wen) using

> df.freak2 <- baseRSS("http://freakonometrics.hypotheses.org/feed?paged=2")
Namespace prefix dc on creator is not defined
Namespace prefix content on encoded is not defined
Namespace prefix wfw on commentRss is not defined
Namespace prefix slash on comments is not defined
> head(df.freak2)
                                         title
1       S\303\251ries chronologiques, syllabus
2 Copules et valeurs extr\303\252mes, syllabus
3         Jimmy, Mile End, et le Qu\303\251bec
4                Multivariate Archimax copulas
5                     Somewhere else, part 107
6     Informatique (sans ordinateur), partie 1
                                        links                         pubDate
1 http://freakonometrics.hypotheses.org/11593 Mon, 06 Jan 2014 00:31:52 +0000
2 http://freakonometrics.hypotheses.org/11595 Mon, 06 Jan 2014 00:31:21 +0000
3 http://freakonometrics.hypotheses.org/11362 Sun, 05 Jan 2014 03:33:31 +0000
4  http://freakonometrics.hypotheses.org/7673 Sat, 04 Jan 2014 11:01:05 +0000
5 http://freakonometrics.hypotheses.org/11584 Fri, 03 Jan 2014 15:34:29 +0000
6 http://freakonometrics.hypotheses.org/11138 Fri, 03 Jan 2014 07:15:03 +0000

and if we try to get a page that does not exist, we got the following error

> df.freakFaux <- baseRSS("http://freakonometrics.hypotheses.org/feed?paged=2000")
failed to load HTTP resource
Error : 1: failed to load HTTP resource

(unfortunately, I could not do it with https://feeds.feedburner.com/ for instance). With the following code, we can extract information about all the posts online on my blog

> df.freak <- NULL
> for(i in 1:2000){
+   df.tmp <- baseRSS(paste("http://freakonometrics.hypotheses.org/feed?paged=", i, sep = ""))
+   if(length(df.tmp)>1){
+     df.freak <- rbind(df.freak, df.tmp)
+   }else{ break }
+ }

All that is just fine. Now, let us write a small function to convert the date into some format I can use (here, I want to study the hour, as well as the week day).

> LD=c("Mon","Tue","Wed","Thu","Fri","Sat","Sun")
> datahour=function(txt){
+ wd=substr(as.character(txt),1,3)
+ wdy=which(LD==wd)
+ y=substr(as.character(txt),13,16)
+ h=substr(as.character(txt),18,19)
+ mn=substr(as.character(txt),21,22)
+ T=as.numeric(h)+as.numeric(mn)/60
+ return(data.frame(weekday=wdy,time=T,year=as.numeric(y)))}

> datarss=function(df){
+ L=unlist(lapply(as.character(df$pubDate),datahour))
+ db=data.frame(
+ D=L[names(L)=="weekday"],
+ T=L[names(L)=="time"],
+ Y=L[names(L)=="year"])
+ return(db)}

Here, I extract the week day, the time in the day (continuous, from 0 till 24, excluded). With the following function we can see the proportion of posts per week day,

> hc=rev(heat.colors(100))
> weekday=function(db,yearinf=FALSE){
+ y=unique(db$Y)
+ if(yearinf==TRUE) y=y[-which.max(y)]
+ if(yearinf==FALSE) y=y[-c(which.max(y),which.min(y))]
+ L=NULL
+ for(i in y){
+ sB=subset(db,db$Y==i)
+ L=rbind(L,table(sB$D)/nrow(sB)*100)}
+ barplot(t(L[nrow(L):1,]),names=rev(y),col=hc[c(rep(15,5),rep(70,2))])
+ }

(from the bottom to the top, Monday till Friday in light yellow, and Saturday and Sunday in light red). Here, on my own blog, it would be

> weekday(datarss(df.freak))

For the hour, it was slightly more technical (I could not find a decent and simple way to plot the graph I was looking for graph, so I did it by myself)

> hour=function(db,yearinf=FALSE){
+ y=unique(db$Y)
+ if(yearinf==TRUE) y=y[-which.max(y)]
+ if(yearinf==FALSE) y=y[-c(which.max(y),which.min(y))]
+ L=NULL
+ for(i in y){
+ sB=subset(db,db$Y==i)
+ if(i==2013) t=table(floor((sB$T+6)%%24))/nrow(sB)*100
+ if(i<2013)  t=table(floor(sB$T))/nrow(sB)*100
+ t=t[as.character(0:23)]
+ names(t)=as.character(0:23)
+ t[is.na(t)]=0
+ L=rbind(L,t)}
+ plot(y,rep(24,length(y)),ylim=c(-3,24),axes=FALSE,
+ xlim=c(min(y)-.5,max(y)+.5),xlab="",ylab="",col="white")
+ axis(2)
+ for(i in y){
+ text(i,-2,i)
+ for(j in 0:23){
+ polygon(c(i-.4,i-.4,i+.4,i+.4),
+ c(j,j+1,j+1,j),border=NA,col=hc[L[max(y)-i+1,j+1]/max(L)*98+1])
+ }}}

Just a short comment here. If you look at the code, there is a difference between 2013, and before. The reason is simple: in December 2012, I officially decided to migrate from my old blog to this new one. All the post prior December 2012 were initially published on the old blog. Which was at Montréal (East Coast) time. And I have the feeling that my new blog has a European time. So I did translate, of 6 hours. But the problem might be more complicated actually

> hour(datarss(df.freak))

Now, if we try to comment. On the week days, I find it a bit scary, to see that I spend so much time during the weekends on my (supposed to be) professional blog. And on the hour, I can explain the 2013 easily. I usually spend most of my evenings working (on the blog, or on my courses, or on my research). But usually, I try to avoid posting an entry at 2 a.m. So usually, I keep it until the morning, then when I arrive at the office, I finalize the post, and I make it available.

To understand the difference with previous years, I should probably add a technical comment : the previous blog was on a dotclear platform. On dotclear the Publication time is not exactly the time the post was officially posted online, but the default value is more the time the post was saved for the first time. So there might be some slight differences. I believe that previously, I started to work on a post in the afternoon, then I might spend some time in the evening, even the day after, but when I publish it, if I do not change the default settings, then the publication time would be the afternoon, when I did save the post.

Let us try on another blog… The problem is that is it is quite difficult to get old entries from the rss feeds. Except with WordPress… So I tried to run the previous code on http://economix.blogs.nytimes.com/. The extraction is simple here.

 501 Tue, 14 May 2013 04:01:

But here again, I do have trouble with 2013. To be more specific, when I look at the feeds I get

while I have on my side

 501 Tue, 14 May 2013 04:01:29 +0000
 502 Mon, 13 May 2013 19:51:06 +0000
 503 Mon, 13 May 2013 04:01:46 +0000
 504 Fri, 10 May 2013 21:11:58 +0000
 505 Fri, 10 May 2013 18:45:48 +0000
 506 Fri, 10 May 2013 17:33:55 +0000
 507 Fri, 10 May 2013 13:00:41 +0000
 508 Fri, 10 May 2013 04:01:53 +0000

I have here a 4 hours difference I cannot explain. But it looks fine before 2013. If I use the previous code, with (in the loop)

+   df.tmp <- baseRSS(paste("http://economix.blogs.nytimes.com/feed/?paged=", i, sep = ""))

we can get, for instance the following  graph,

 

We do observe an interesting dynamics here : I guess that previously people were working during the day, and then posting at the end of the day. It looks like, now, people work in the day, sometimes late in the evening, but wait till the next morning to post the entry. Just as I did, in order to read one last time, with a fesh mind… Anyway, I still have to understand what did happened in 2013, just to make sure that the data I extract can be used…

Risk Measures with Extreme Value Models

We’ve seen Monday, in the MAT8595 course how to use the Generalized Pareto Distribution to estimate some downside risk measures, given a sample (assumed to be i.i.d., I will not mention here properties on extremes for stochastic processes) with distribution https://latex.codecogs.com/gif.latex?F. The cumulative distribution function of the  Pareto distribution is here

For some threshold , and https://latex.codecogs.com/gif.latex?x\geq%20u, we can write

From Pickands–Balkema–de Haan theorem, if is large enough, then

Given our sample https://latex.codecogs.com/gif.latex?\{x_1,\cdots,x_n\}, let  denote the number of observations over,  threshold . Then we can write

or equivalently

If we invert this function, we get the quantile of level ,

Actually, a threshold and then the implied number of observation exceeding that threshold, it is possible to consider a fixed number of observation, and then the associated threshold will be the associated order statistics.

The density of the Pareto distribution is here

https://latex.codecogs.com/gif.latex?%20%20%20%20%20g_{(\xi,\sigma)}(x)%20=%20\frac{1}{\sigma}\left(1%20+%20\frac{\xi%20x}{\sigma}\right)^{\left(-\frac{1}{\xi}%20-%201\right)}

which is here function of two paramters, https://latex.codecogs.com/gif.latex?%20%20\xi and https://latex.codecogs.com/gif.latex?\sigma.As discussed in the course, it is possible to use the Delta method to derive the asymptotic distribution of any quantile, and get then an approximated (asymptotic) confidence interval.

But since https://latex.codecogs.com/gif.latex?\sigma is usually not a parameter of interest, why not considering a reparametrization of our density, as a function of  https://latex.codecogs.com/gif.latex?%20%20\xi and https://latex.codecogs.com/gif.latex?Q(p) (for some probability https://latex.codecogs.com/gif.latex?p that will be considered as fixed from now on). We can easily get (assuming that https://latex.codecogs.com/gif.latex?\xi\neq%200) that

https://latex.codecogs.com/gif.latex?g_{\xi,Q(p)}(x)=\frac{\displaystyle{\left(\frac{n}{N_u}(1-p)\right)^{-\xi}-1}}{\xi[Q(p)-u]}\left(1+\frac{\displaystyle{\left(\frac{n}{N_u}(1-p)\right)^{-\xi}-1}}{[Q(p)-u]}\cdot%20x\right)^{-\frac{1}{\xi}-1}

Tis expression is simple, and can be used to derive the likelihood (on the observations exceeding the threshold)

https://latex.codecogs.com/gif.latex?\log\mathcal{L}(\xi,Q(p);\boldsymbol{x})=\sum_{i=0}^{N_u-1}%20\log%20g_{\xi,Q(p)}(x_{n-i:n})Numerically, let us write (and plot) that function. Consider some real data here

> X=as.numeric(danish)
> Xs=sort(X,decreasing=TRUE)
> n=length(X)
> u=10
> nu=sum(X>u)

Consider, say, the 99.9% quantile,

> p=.999

The empirical quantile is here

> quantile(X,p)
   99.9% 
131.5519

The density and the loglikelihood functions are here

> gq=function(x,xi,q){
+ ( (n/nu*(1-p) ) ^ (-xi)-1)/(xi*(q-u))*
+ (1+((n/nu*(1-p))^(-xi)-1)/(q-u)*x)^(-1/xi-1)}

> loglik=function(param){
+ xi=param[2];q=param[1]
+ lg=function(i) log(gq(Xs[i],xi,q))
+ return(-sum(Vectorize(lg)(1:nu)))
+ }

We can try to plot this likelihood using

> h=201
> Q=seq(50,300,length=h)
> XI=seq(.1,1,length=h)
> XIQ=as.matrix(expand.grid(Q,XI))
> M=mapply(loglik,XIQ)

Unfortunately, it was not working, so I used the old style

> M=matrix(NA,h,h)
> for(i in 1:h){for(j in 1:h){M[i,j]=loglik(c(Q[i],XI[j]))}}

The level curves of the log-likelihood are here

> hc=heat.colors(100)
> image(Q,XI,-M,col=hc)
> contour(Q,XI,-M,add=TRUE)

Again, since our interest is in the quantile, we can draw the profile likelihood and get the maximum of that function

> PL=function(Q){
+ profilelikelihood=function(xi){
+ loglik(c(Q,xi))}
+ return(optim(par=.8,fn=profilelikelihood)$value)}
> (OPT=optimize(f=PL,interval=c(100,500)))

$minimum
[1] 111.1055

and the graph is

> XQ=seq(50,300,length=101)
> L=Vectorize(PL)(XQ)
> plot(XQ,-L,type="l")
> up=OPT$objective
> abline(h=-up)
> abline(h=-up-qchisq(p=.95,df=1),col="red")
> I=which(-L>=-up-qchisq(p=.95,df=1))
> lines(XQ[I],rep(-up-qchisq(p=.95,df=1),length(I)),
+ lwd=5,col="red")
> abline(v=range(XQ[I]),lty=2,col="red")

which can be seen as an alternative to

> gpd.q(tailplot(gpd(X,u)),.999)
 Lower CI  Estimate  Upper CI 
 64.66184  94.28956 188.91752 

$objective
[1] 454.6481

If we want to focus on another downside risk measure, that shouldn’t be too difficult. For instance, the expected shortfall,  can be estimated as

where  denotes the mean excess function, which can be writen, with a Generalized Pareto Distribution

Thus, a natural estimator for the expected shortfall is

One more time, it is possible to re-parametrize the density of the Pareto distribution, using https://latex.codecogs.com/gif.latex?ES(p) instead of https://latex.codecogs.com/gif.latex?\sigma. Here, we get

https://latex.codecogs.com/gif.latex?g_{\xi,ES(p)}(x)=\frac{\displaystyle{\xi+\left(\frac{n}{N_u}(1-p)\right)^{-\xi}-1}}{\xi(1-\xi)[ES(p)-u]}\left(1+\frac{\displaystyle{\left(\frac{n}{N_u}(1-p)\right)^{-\xi}-1}}{(1-\xi)[ES(p)-u]}\cdot%20x\right)^{-\frac{1}{\xi}-1}

The code to get the associated log-likelihood is here

> ge=function(x,xi,es){
+ (xi+(n/nu*(1-p))^(-xi)-1)/(xi*(1-xi)*(es-u))*(1+(xi+(n/nu*(1-p))^(-xi)
+ -1)/((es-u)*(1-xi))*x)^(-1/xi-1)
+ }
> loglik=function(param){
+ xi=param[2];es=param[1]
+ lg=function(i) log(ge(Xs[i],xi,es))
+ return(-sum(Vectorize(lg)(1:nu)))
+ }

and again, we can plot it

and the profile (log) likelihood is here (for the 99.9% expected shortfall)

> PL=function(ES){
+ profilelikelihood=function(xi){
+ loglik(c(ES,xi))}
+ return(optim(par=.8,fn=profilelikelihood)$value)}
> (OPT=optimize(f=PL,interval=c(100,500)))
$minimum
[1] 143.66

$objective
[1] 454.6481

which could be compared with

> gpd.sfall(tailplot(gpd(X,u)),.999)
 Lower CI  Estimate  Upper CI 
 96.64625 191.36972 394.87555

Vraisemblance Locale Adaptative

Depuis la fin de semaine passée, Julien Tomas est de passage à l’UQAM, pour quelques semaines, en tant que stagiaire post-doctoral. Il fera un séminaire cet après-midi sur Vraisemblance locale adaptative et application à l’assurance dépendance.

Nous nous intéressons à la construction de la loi de survie d’individus dépendants ayant le même niveau de sévérité (dépendance lourde). En pratique, les actuaires utilisent souvent des méthodes s’appuyant fortement sur l’opinion d’experts. Nous proposons des approches ne dépendant pas d’avis d’experts. La mortalité est analysée en fonction de l’âge à la survenance de la pathologie et l’ancienneté. La mortalité des dépendants est caractérisée par une structure relativement complexe. Plutôt que d’utiliser des approches paramétriques ou des modèles avec avis d’experts, les méthodes adaptatives de vraisemblance locale permettent d’extraire de façon pertinente l’information contenue dans les données en variant les paramètres du lissage selon l’âge à la survenance et l’ancienneté. Nous caractérisons une méthode ponctuelle de vraisemblance locale utilisant la règle de l’intersection des intervalles de confiance et un modèle global avec des facteurs d’ajustement local de la fenêtre d’observations. La dernière est une extension de la méthode adaptative à noyaux proposée par Gavin et al. (1995) aux techniques de vraisemblance. Nous modifions le niveau de lissage en fonction de l’emplacement et nous permettons des ajustements de la fenêtre d’observations en fonction de la fiabilité des données. Des tests et marqueurs résumant les distributions de survie sont utilisés pour comparer les séries graduées obtenues par les méthodes adaptatives de vraisemblance locale aux modèles de p-splines.

 

Bar des Sciences: Débat sur le Big Data

Le Cœur des Sciences, à Université du Québec à Montréal, organise le 13 février prochain, à 18h, un débat grand public sur le Big Data, dans le cadre d’un bar des sciences, auquel je devrais participer, avec Vincent Gautrais (a.k.a. @gautrais), Yves-Alexandre de Montjoye (a.k.a. @yvesalexandre) et Jean-Hughes Roy (a.k.a. @jeanhuguesroy). Les mauvaises langues diront que je n’aurais pas pu refuser d’intervenir dans un bar (et elles n’auront probablement pas tort). Cela dit, le public ne semble plus autorisé : dans les heures qui ont suivi l’annonce, l’événement a été complet !

Je vais en profiter, aujourd’hui, pour livrer quelques éléments de réflexion… les commentaires sont ouverts, mais la suite du débat se fera au Cœur des Sciences (j’essayerais de faire un résumé sur le blog). Car c’est la première fois que je fais une intervention sur le sujet (de manière aussi explicite). Je ne peux m’empêcher d’avoir une pensée pour les mots de Dan Ariely, sur le Big Data,

Pour avoir discuté avec de nombreux professionnels, dans l’industrie, qui manipulent des données quotidiennement, je les ai vu passer par les cinq grandes étapes décrites par Zubin Dowalty,

  1. le déni, “There’s nothing in that big data that we don’t already know.
  2. la colère, “There’s nothing in that big data that we don’t already know!” (avec un point d’exclamation cette fois)
  3. la négociation, “If we could just get the budget to expand the project…
  4. la dépression, “These data sets are just overwhelming. There’s no way we can do this.”
  5. l’acceptation, “This isn’t going to happen overnight. We need to be strategic about how and when to undertake big data analysis.

C’est devenu un cliché que l’utilisation de l’informatique exploser, de manière exponentielle. Aussi bien au niveau du nombre d’utilisateurs, de la vitesse de calcul, des capacités de stockage que du volume de données récoltées via internet

Si on admet qu’il y a matière à réflexion, il va falloir dire davantage de quoi parle, car si on fonctionne par association d’idées, le terme Big Data peut évoquer beaucoup de choses. En tant que statisticien, il me fait penser à des problèmes assez profonds de modélisation, de high frequency-data (sans parler de HFT), de différence entre causalité et corrélation, ainsi que des aspects importants de statistique computationnelle (difficulté de faire tourner des algorithmes sur des très très grosses matrices, sans parler des mathématiques plus complexes qui vont avec). En tant que spécialiste de l’analyse de données, ce terme m’évoque deux autres termes connexes, que sont l’open data et le hacking de données. En tant qu’économiste, je pense également au business associé aux logiciels d’analyse de données, aux espaces publicitaires de Google, aux suggestions que me fait Amazon dès que je commande un livre, à l’utilisation des informations en ligne (par exemple les amis sur Facebook) dans les scores de crédit, etc. Mais l’accent qui a été mis ici (de part la seconde partie du titre), c’est l’aspect Big Brother. Donc un aspect important sera probablement autour des applications mobiles, du marketing, d’internet, et des algorithmes qui semblent nous contrôler.

Bref, on va parler de data science. Sean Owen notait (toujours avec ironie)

On pourrait donc croire qu’il n’y a rien de nouveau sous le soleil…?

  • le Big Data avec les yeux d’un statisticien

Pour reprendre les mots d’Alan Mitchell, dans big data, big dead end,

Big data is all about statistics: divining patterns and trends from large data sets. Statistics are incredibly powerful and useful for the way they challenge the assumptions and inferences naturally made by human minds – many of them faulty. As I said, that’s great.

Avant de rentrer dans le vif du sujet, deux petites réflexions pour débuter. Autour du mot Big pour commencer. En mathématiques financières, beaucoup de modèles de valorisation ont été développés en temps continu, avec des processus de prix . Délicat de réconcilier avec l’économétrie financière, où les prix sont observés à des dates . En statistique mathématique et en économétrie, la majorité des théorèmes sont asymptotiques, i.e. valides lors que la taille de l’échantillon  tend vers l’infini. Aussi, traditionnellement, avoir des Small Data était un soucis, car il semble que les modèles ont été pensé, précisément, sous l’hypothèse qu’un grand nombre d’observations serait disponible. Narinder Singh, écrivait en décembre dernier

Big Data is misnamed in our (academic) world, because data sets have always been big. What is different is that we now have the technology to simply run every scenario.

Mais ça n’est pas si simple que ça, loin de là. Le terme Data tout d’abord s’est élargie. Les données étaient auparavant des niveaux de production industrielle, ou des prix de matière première.

Classiquement, ces jeux de données était présenté sous forme matricielle, un rectangle de https://latex.codecogs.com/gif.latex?n lignes (les observations) et https://latex.codecogs.com/gif.latex?k colonnes (les variables). Historiquement, on avait un nombre important (par forcément Big) de lignes, un peu moins de variables pour les expliquer. Un certain nombre de problèmes se sont posé en génétique, quand https://latex.codecogs.com/gif.latex?k s’est mis à croitre plus rapidement que https://latex.codecogs.com/gif.latex?n. Et les données on commencé à devenir plus compliquées. On pourra penser aux données de l’assurance maladie, avec des consultations médicales, des consommations de médicaments (consécutives à une visite chez le médecin), puis la visite d’un spécialiste, etc. Des transcriptions d’appels téléphonique à un centre d’appels, ou des collections de livres dans différentes langues éventuellement. Des images, ou des vidéos. Voire de l’imagerie médicale en trois dimensions (en plus du temps). Des achats de consommateurs sur un site de vente en ligne (des grosses matrices creuses, car non informées). Bref, le mot Data, il est devenu plus complexe qu’il ne l’était auparavant (nous reviendrons un peu sur les outils mathématiques pour les appréhender).

1. Le Big Data, renoncer à chercher des cause et identifier des corrélations 

(dont j’avais déjà parlé rapidement dans un billet suite à ma lecture des livres de Nate Silver, et de  Kenneth Cukier et Viktor Mayer-Schönberger). Pour résumer rapidement, Kenneth Cukier et Viktor Mayer-Schönberger notaient dans mise en données du monde, le déluge numérique,

Pareil usage suppose trois changements majeurs dans notre approche. Le premier consiste à recueillir et à utiliser le plus grand nombre possible d’informations plutôt que d’opérer un tri sélectif comme le font les statisticiens depuis plus d’un siècle. Le deuxième implique une certaine tolérance à l’égard du désordre : mouliner des données innombrables, mais de qualité inégale, s’avère souvent plus efficace qu’exploiter un petit échantillon impeccablement pertinent. Enfin, le troisième changement implique que, dans de nombreux cas, il faudra renoncer à identifier les causes et se contenter de corrélations. Au lieu de chercher à comprendre précisément pourquoi une machine ne fonctionne plus, les chercheurs peuvent collecter et analyser des quantités massives d’informations relatives à cet événement et à tout ce qui lui est associé afin de repérer des régularités et d’établir dans quelles circonstances la machine risque de retomber en panne. Ils peuvent trouver une réponse au « comment », non au « pourquoi » ; et, bien souvent, cela suffit. […] Ce changement d’approche à l’égard des données numériques — exhaustives et non plus échantillonnées, désordonnées et non plus méthodiques — explique le glissement de la causalité vers la corrélation. On s’intéresse moins aux raisons profondes qui président à la marche du monde qu’aux associations susceptibles de relier entre eux des phénomènes disparates. L’objectif n’est plus de comprendre les choses, mais d’obtenir une efficacité maximale.

On y retrouve ici les propos de Chris Anderson, tenu en 2008 dans The End of Theory: The Data Deluge Makes the Scientific Method Obsolete,

In short, the more we learn about biology, the further we find ourselves from a model that can explain it. There is now a better way. Petabytes allow us to say: “Correlation is enough.” We can stop looking for models. We can analyze the data without hypotheses about what it might show. We can throw the numbers into the biggest computing clusters the world has ever seen and let statistical algorithms find patterns where science cannot.

C’est un changement profond de mentalité. En 1963, Karl Popper (bien que traditionnellement vu comme un philosophe critique de l’inductivisme) affirmait qu’il était inutile de recueillir des données en espérant faire ressortir des similitudes, permettant ensuite de faire émerger une nouvelle théorie :

the belief that we can start with pure observations alone, without anything in the nature of a theory, is absurd; as may be illustrated by the story of the man who dedicated his life to natural science, wrote down everything he could observe, and bequeathed his priceless collection of observations to the Royal Society to be used as inductive evidence. This story should show us that though beetles may profitably be collected, observations may not.

Malgré tout, le big data propose un changement de paradigme sur la méthode scientifique, peut être plus que sur la modélisation statistique.

2. Petit problème épistémologique

Au delà du débat corrélation-causalité, la statistique pose des problèmes fondamentaux d’épistémologie, pour les philosophes des sciences. Pour reprendre l’idée de Karl Popper, dans The Logic of Scientific Discovery, reprise dans Gilles (1971)

for although probability statements play such a vitally important role in empirical science, they turn out to be impervious to strict falcification

Un exemple est donné par Katja de Vries: le postulat “tous les cygnes sont blancs” a été invalidé dès lors qu’un cygne noir a été observé pour la première fois. C’est ainsi que fonctionne la science. Mais si on reprend l’exemple suivant (pris chez un autre grand philosophe)

on note qu’il est difficile (“impossible” dirait Karl Popper) d’invalider une hypothèse statistique. Pour pouvoir affirmer avec certitude que la probabilité qu’un pièce tombe sur “face” est 50%, il faut une infinité de tirages de cette pièce:

only an infinite sequence of events (…) could contracit a probability estimate

C’est d’ailleurs l’argumentaire que l’on retrouve abordé avec maints exemples dans Merchants of Doubt, de Naomi Oreskes et Erik Conway (je reviendrais sur ce livre dans les semaines qui viennent). C’est probablement à cause de cela que beaucoup de prévisionnistes (je préfère le terme “prédicateurs“) sont vus comme des charlatans. Si j’affirme qu’il y a 3 chances sur 10000 pour qu’un incendie se déclare au pavillon du Coeur des Science le 13 février, il sera difficile de valider ou d’invalider mon estimation. Quoi qu’il se passe, je pourrais toujours dire “je vous l’avais dit” (soit qu’un incendie était possible, soit qu’il était extrêment improbable). On va alors quiter le monde où un évènement serait “impossible” à celui où il serait “hautement improbable“. Ce qu’un scientifique appelerait “peu vraisemblable” correspondrait à ce que Karl Popper appelerait “practical falsified“,

It is fairly clear that this ‘practical falsification’ can be obtained only through a methodological decision to regard highly improbable events as ruled out – as prohibited. But with what right can they be so regarded? Where are we to draw the line? Where does this ‘high improbability’ begin?

En pratique, la réponse a été apportée par l’utilisation d’une p-value, une sorte de ‘statistical significant falsification‘, pour reprendre les termes de Karl Popper. Nous allons revenir sur ce point dans quelques paragraphes.

3. Du modèle paramétrique à la statistique non-paramétrique

Dans the end of theory: the data deluge makes the scientific method obsolete, Chris Anderson prétendait que

Data without a model is just noise. But faced with massive data, this approach to science — hypothesize, model, test — is becoming obsolete.

C’est là encore probablement un peu plus complexe qu’il n’y parait. La commission Cowles, qui a posé les bases de la théorie économétrique (tout en fondant l’Econometric Society, et la prestigieuse revue Econometrica) partait du postulat qu’un modèle économétrique devait être le reflet d’un modèle économique. C’est ce qu’on retrouve dans les SEM (Structural Equation Model) comme le modèle de Klein, proposé en 1950,

Techniquement, on parlera de modèle paramétrique, puisqu’à partir de jeux de données, les seuls inconnues sont les paramètres  et . La transition vers les modèles non-paramétriques, dans lesquels les données l’emportent sur un éventuel modèle, ne s’est pas fait sans douleur. Leo Breiman l’analyse dans statistical modeling: the two cultures, paru en 2001.

Pour comprendre la distinction, on peut reprendre l’exemple classique du modèle linéaire. Considérons les 50 observations suivantes, avec des couples de variables . Dans le modèle linéaire, on suppose que

mais on peut aussi considérer un modèle quadratique

(ou toute autre relation fonctionnelle ayant une forme paramétrique). Si on suppose que  est un bruit, imprévisible, on peut espérer que ce bruit soit centré, au sens où – en moyenne – ce bruit doit être nul. Pour ajuster notre modèle, on va alors chercher à minimiser la somme des carrés de ces erreurs, on on cherche

On peut alors montrer que cette méthode permet d’obtenir la valeur moyenne de https://latex.codecogs.com/gif.latex?Y, à https://latex.codecogs.com/gif.latex?X donné,

Mais si on veut connaître la moyenne de https://latex.codecogs.com/gif.latex?Y sachant https://latex.codecogs.com/gif.latex?X=x, pourquoi par juste prendre une moyenne, calculée sur les https://latex.codecogs.com/gif.latex?X_i proches de la valeur x qui nous intéresse ? C’est ce qu’on va appeler un estimateur non-paramétrique de la “droite” de régression

https://latex.codecogs.com/gif.latex?\widetilde{Y}(x)=\frac{\displaystyle{\sum_{i=1}^n%20Y_i\cdot%20\boldsymbol{1}(\vert%20X_i-x\vert\leq%20h)}}{\displaystyle{\sum_{i=1}^n%20\boldsymbol{1}(\vert%20X_i-x\vert\leq%20h)}}

On ne fait pas ici de modèle, et la prévision sera imposée par nos données.

Pour aller plus loin sur les outils statistiques, on pourra relire Hal Varian.

4. La p-value à l’ère du Big Data

Dans too big to fail: large samples and the p-value problem, Mingfeng Lin, Henry Lucas, Jr. et Galit Shmueli notent le point suivant

A key issue with applying small-sample statistical inference to large samples is that even minuscule effects can become statistically significant. The increased power leads to a dangerous pitfall as well as to a huge opportunity. The issue is one that statisticians have long been aware of: “the p-value problem.” Chatfield (1995, p. 70) comments, ‘The question is not whether differences are ‘significant’ (they nearly always are in large samples), but whether they are interesting. Forget statistical significance, what is the practical significance of the results?’

Sur l’utilisation de la p-value, ainsi que son histoire, je renvois à quelques billets. Le problème de la taille de l’échantillon est plus technique. Je remets ça à un billet dans les mois qui viennent…

5. Le Big-Data pour remplacer des algorithmes qui ne peuvent être améliorés

Une des difficulté est de modéliser les prises de décisions d’êtres humains. Dans des comportements d’achat (nous allons y revenir un peu plus tard), mais aussi dans le cadre de jeux de stratégie. Les jeux sont plus simples que la vraie vie, car les règles y sont claires et non modifiables. En particulier le jeu d’échec. Comme le notent Viktor Meyer-Schönberger et Kenneth Cukier dans Big data, a revolution that will transform how we live, work and think

For example, chess algorithms have changed only slightly in the past few decades, since the rules of chess are fully known and tightly constrained. The reason computer chess programs play far better today than in the past is in part that they are playing their endgame better. And they’re doing that simply because the systems have been fed more data. In fact, endgames when six or fewer pieces are left on the chessboard have been completely analyzed and all possible moves (N=all) have been represented in a massive table that when uncompressed fills more than a terabyte of data. This enables chess computers to play the endgame flawlessly. No human will ever be able to outplay the system.

Cet exemple est également repris par Nate Silver dans the Signal and the Noise,

Chess might be thought of as analogous to prediction. The players must process information—the position of the thirty-two pieces on the board and their possible moves. They use this information to devise strategies to place their opponent in checkmate. These strategies in essence represent different hypotheses about how to win the game. Whoever succeeds in that task had the better hypothesis. Chess is deterministic—there is no real element of luck involved. But the same is theoretically true of the weather (…).Our knowledge of both systems is subject to considerable imperfections. In weather, much of the problem is that our knowledge of the initial conditions is incomplete. Even though we have a very good idea of the rules by which the weather system behaves, we have incomplete information about the position of all the molecules that form clouds and rainstorms and hurricanes. Hence, the best we can do is to make probabilistic forecasts. In chess, we have both complete knowledge of the governing rules and perfect information—there are a finite number of chess pieces, and they’re right there in plain sight. But the game is still very difficult for us. Chess speaks to the constraints on our information-processing capabilities—and it might tell us something about the best strategies for making decisions despite them. The need for prediction arises not necessarily because the world itself is uncertain, but because understanding it fully is beyond our capacity. Both computer programs and human chess masters therefore rely on making simplifications to forecast the outcome of the game. We can think of these simplifications as “models,” but heuristics is the preferred term in the study of computer programming and human decision making. It comes from the same Greek root word from which we derive eureka.10 A heuristic approach to problem solving consists of employing rules of thumb when a deterministic solution to a problem is beyond our practical capacities

Et cet exemple du jeu d’échec se retrouve sur la traduction automatique, entre autres.

The degree to which more data trumps better algorithms has been powerfully demonstrated in the area of natural language processing: the way computers learn how to parse words as we use them in everyday speech. Around 2000, Microsoft researchers Michele Banko and Eric Brill were looking for a method to improve the grammar checker that is part of the company’s Word program. They weren’t sure whether it would be more useful to put their effort into improving existing algorithms, finding new techniques, or adding more sophisticated features. Before going down any of these paths, they decided to see what happened when they fed a lot more data into the existing methods. Most machine-learning algorithms relied on corpuses of text that totaled a million words or less. Banko and Brill took four common algorithms and fed in up to three orders of magnitude more data: 10 million words, then 100 million, and finally a billion words. The results were astounding. As more data went in, the performance of all four types of algorithms improved dramatically. In fact, a simple algorithm that was the worst performer with half a million words performed better than the others when it crunched a billion words. Its accuracy rate went from 75 percent to above 95 percent. Inversely, the algorithm that worked best with a little data performed the least well with larger amounts, though like the others it improved a lot, going from around 86 percent to about 94 percent accuracy. “These results suggest that we may want to reconsider the tradeoff between spending time and money on algorithm development versus spending it on corpus development,” Banko and Brill wrote in one of their research papers on the topic.

Aussi, sur http://translate.google.com/, on peut avoir une traduction du portugais vers le français, sans que l’ordinateur ne “connaisse” aucune des deux langues. Il pourrait tout aussi bien traduire vers de l’elfique ou du Klingon, comme le sugérait Chris Anderson dans the end of theory: the data deluge makes the scientific method obsolete.

6. Les outils mathématiques du Big-Data

Un problème classique, vu comme un problème trop complexe car aboutissant à des calculs impossibles à mener en un temps raisonnable (disons avant la fin du système solaire dans quelques milliards d’années) est le problème du voyageur de commerce. En 1962, Procter & Gamble ont offert un prix de 10,000$ à celui qui trouverait le chemin le plus court pour que Toody et Muldoom, les conducteurs de la voiture 54 dans une série populaire à l’époque, passent par 33 villes, aux États-Unis.

Comme le calcule William Cook, dans In Pursuit of the Traveling Salesman, avec 33 villes, il y aurait

131,565,418,466,846,765,083,609,006,080,000,000

chemins possibles, dont il faudrait calculer la longueur. Et qu’avec l’ordinateur le plus puissant en 2009 (effectuant 1.5 million de milliards d’opérations à la seconde) il faudrait 28,000 milliards d’année pour mener à bien les calculs. Une des ébauches de solution est celle proposée par Euler, dans son problème des sept ponts de Königsberg, dans solutio problematis ad geometriam situs pertinentis.

Cette approche à l’aide de graphs a donné naissance à la notion de Topological Data Analysis (trois tomes sur le sujet ont été publiés récemment chez Springer).

Topological data analysis is a way of getting structured data out of unstructured data so that machine-learning algorithms can act more directly on it

Parmi les techniques utilisées, on va retrouver les méthodes d’échantillonnage. En 2007, Jim Gray (lauréat du prix Turing) affirmait (cette idée a été reprise dans The Fourth Paradigm, paru en 2009)

The world of science has changed, and there is no question about this. The new model is for the data to be captured by instruments or generated by simulations before being processed by software and for the resulting information or knowledge to be stored in computers. Scientists only get to look at their data fairly late in this pipeline. The techniques and technologies for such data-intensive science are so different that it is worth distinguishing data-intensive science from computational science as a new, fourth paradigm for scientific exploration

Sur la visualisation des réseaux, on pourra relire à ce sujet cartographier les réseaux de Martin Grandjean, qui montre un cas pratique sur des réseaux entre parlementaires et activités de lobbying, en Suisse

avec en prime une visualisation élégante

Les ordinateurs ont transformé les mathématiques, plus personnes ne remettra ce point en doute. Les mathématiciens ont longtemps cherché des solutions analytiques aux équations obtenues; et si l’on sait qu’il sera impossible d’avoir une forme analytique élégante, on sait se contenter, depuis plusieurs décennies, de solutions numériques (on pourra relire science in the age of computer simuulations d’Eric Winsberg). Dans computational complexity: a modern approach, Sanjeev Arora et Boaz Barak présentent de manière presque compréhensible le concept de preuves interactives (dans le chapitre 8), introduites en 1985 par Goldwasser, Micali et Rackoff d’une part, Babai d’autre part.

As an example for a probabilistic interactive proof system, consider the following scenario: Marla claims to Arthur that she can distinguish between the taste of Coke (Coca-Cola) and Pepsi. To verify this statement, Marla and Arthur repeat the following experiment 50 times: Marla turns her back to Arthur, as he places Coke in one unmarked cup and Pepsi in another, choosing randomly whether Coke will be in the cup on the left or on the right. Then Marla tastes both cups and states which one contained which drinks. While, regardless of her tasting abilities, Marla can answer correctly with probability 1/2 by a random guess, if she manages to answer correctly for all the 50 repetitions, Arthur can indeed be convinced that she can tell apart Pepsi and Coke.

On retrouve en quelque sorte qu’avec suffisamment de données, on peut tirer des conclusions robustes.

Dans un article passionnant (datant de 2000), intitulé the curses and blessings of dimensionality, David Donoho nous rappelle que la grande dimension n’est pas qu’un fléau (“curve of dimensionsality”) comme l’évoquait Richard Bellman en 1957,

all [problems due to high dimension] may be subsumed under the header the curse of dimensionality. Since it is a curse (…) there is no need to feel discouraged about the possibility of obtaining significant results despite it

En fait, la grande dimension, ce n’est pas si simple, et les difficultés ne vont pas forcément croître avec la dimension. Considérons par exemple le volume d’une sphère de rayon 1, en dimension . On peut montrer que le volume est

Si on regarde cette fonction, effectivement, passer de la dimension 3 a la dimension 4 puis a la dimension 5 faire grossir la taille de notre boule. Mais passée la dimension 10, de manière surprenante, la dimension diminue… Amusant non ?

En plus on n’est pas n’importe où ! En fait, on a très peu de chances d’être au centre: on peu montrer assez simplement que si on tire des points au hasard dans la sphère unité, on a en fait de très très fortes chances d’être au bord de la sphère. Et le calcul est simple: la probabilité d’être a une distance supérieure a  (et donc d’être vraiment proche du bord) si  est

lorsque  (quelle que soit la valeur de ). Bref, les grands espaces, c’est grand, mais c’est rempli de vide !

Il y a plus de 5 ans, lors des journées UseR! à Rennes, Trevor Hastie avait fait un exposé brillant qui m’avait ouvert les yeux sur des problèmes que j’ignorais : les très grosses matrices sont souvent très vide, en évoquant l’exemple de la prévision pour Netflix ou Amazon (ce dernier exemple est repris dans big data, a revolution that will transform how we live, work and think)

Dans les années 60, John Tukey s’est battu pour que l’analyse de données (Data Analysis) devienne une discipline reconnue, distincte de la statistique mathématique. En 1962, il prononçait un discours au congrés annuel de l’IMS (Institute of Mathematical Statistics) intitulé the future of data analysis, qui servira de base à son ouvrage paru 15 ans plus tard, Explanatory Data Analysis. Dans ce discours, il quitte le monde des preuves mathématiques pour explorer les données. Comme le notera Howard Wainer,

he legitimized that, because he wasn’t doing it because he wasn’t good at math; he was doing it because it was the right thing to do

Comme le notait très justement John Tukey dans the technical tools of statistics (via http://cm.bell-labs.com/…)

Today, software and hardware together provide far more powerful factories than most statisticians realize, factories that many of today’s most able young people find exciting and worth learning about on their own. Their interest can help us greatly, if statistics starts to make much more nearly adequate use of the computer. However, if we fail to expand our uses, their interest in computers can cost us many of our best recruits, and set us back many years.

http://f.hypotheses.org/wp-content/blogs.dir/253/files/2013/02/102646212-05-04.jpeg

Car le Big Data, c’est résolument autre chose (par rapport à ce qui se faisait traditionnellement en statistique). Tout d’abord, les simulations se font autour d’un modèle mathématique: on est face à un espace immense, mais il y a un modèle. Alors que le Big Data repose davantage sur un problème d’exploration face à une forêt vierge. Dans le premier cas, on continue à progresser de manière déductive, alors que le Big Data force à avancer par induction. Pour reprendre les termes de Cosma Shalizi dans data mining

Data mining, more stuffily ‘knowledge discovery in databases’, is the art of finding and extracting useful patterns in very large collections of data. It’s not quite the same as machine learning, because, while it certainly uses ML techniques, the aim is to directly guide action (praxis!), rather than to develop a technology and theory of induction. In some ways, in fact, it’s closer to what statistics calls ‘exploratory data analysis’, though with certain advantages and limitations that come from having really big data to explore.

7. “With great power comes great responsibility

Tout le monde le dit, le Big Data redonne le pouvoir aux statisticiens. Pour reprendre l’analyse de Mike Loukides dans what is data science,

According to Mike Driscoll (@medriscoll), statistics is the ‘grammar of data science.’ It is crucial to ‘making data speak coherently.’. But it takes statistics to know whether this difference is significant, or just a random fluctuation.

C’est ce que Nate Silver appelle the Signal and the Noise (pour reprendre le titre de son livre).

Data science isn’t just about the existence of data, or making guesses about what that data might mean; it’s about testing hypotheses and making sure that the conclusions you’re drawing from the data are valid.

Cela dit raconter une histoire (et donc extraire les informations importantes) à partir d’observations en (très) grande dimension est toujours un exercice délicat. Et souvent subjectif.

8. Les applications importantes du Big-Data

Il existe des milliers d’applications concrètes, dans la vie de tous les jours (ou presque). Un exemple important peut être l’imagerie. Il s’agit d’un problème de Big-Data au même sens que les jeux d’échecs. Par exemple, pour reprendre un graphique tiré de Cireşan et al. (2013), sur la détection de mitoses dans des cancers du sein (par des réseaux de neurones), on cherche des tâches sombres atypiques. Avec une fréquence, et une distribution que l’on retrouve chez des personnes malades, et pas chez des personnes saines,

En tant que statisticien, le big data, c’est un porte-ouverte fabuleuse. Pour reprendre le diagramme de Venn proposé par Vincent Granville,

C’est plus ni moins ce qu’affirmait Mike Loukides dans what is data science,

What differentiates data science from statistics is that data science is a holistic approach. We’re increasingly finding data in the wild, and data scientists are involved with gathering data, massaging it into a tractable form, making it tell its story, and presenting that story to others.

Et je ne peux que confirmer ! Lorsqu’auparavant, dans une soirée, je disais que j’étais mathématicien, ou statisticien, j’étais constamment vu comme un nerd (et ce n’était probablement pas infondé). Maintenant que je suis data scientist, expert en big data, je me fais inviter par des jolies filles dans des bars en tant que VIP (des bars des sciences, certes, mais c’est un début !).

  • le Big Data avec les yeux d’un économiste

Quand on pense Big Data, on pense aussi au commerce en ligne, et aux applications interactives (en tous les cas qui doivent fournir des réponses beaucoup plus vite qu’une analyse statistique poussée, menée sur plusieurs semaines, voir plusieurs mois).Pour reprendre l’analyse de Dave Rich et Jeanne Harris dans why predictive analytics is a game-changer, how companies use real-time data to plan for the future,

In simple terms analytics means using quantitative methods to derive insights from data, and then drawing on those insights to shape business decisions and, ultimately, improve business performance. Thus predictive analytics is emerging as a game-changer. Instead of looking backward to analyze ‘what happened?’ predictive analytics help executives answer ‘What’s next?’ and ‘What should we do about it?’

1. Enjeux financiers du Big Data

En février 2010, The Economist, dans l’article data, data everywhere, évaluait que la vente de logiciels spécialisés dans les notions de “data management” et autres “analytics” représentait un enjeux financier colossal, avec des montants de l’ordre de 100 milliards de dollars, avec un taux de croissance de l’ordre de 10% par an,

The business of information management—helping organisations to make sense of their proliferating data—is growing by leaps and bounds. In recent years Oracle, IBM, Microsoft and SAP between them have spent more than $15 billion on buying software firms specialising in data management and analytics. This industry is estimated to be worth more than $100 billion and growing at almost 10% a year, roughly twice as fast as the software business as a whole.

Et le travail des data engineers va au delà (en quelque sorte) du travail traditionnel du statisticien. Le titre complet de l’article what is data science? de Mike Loukides (publié en juin 2010) était the future belongs to the companies and people that turn data into products. Pour une analyse plus poussées de liens entre l’économie et le Big Data, on pourra relire the data revolution and economic analysis par Liran Einav et Jonathan Levin, ou seven big data trends for 2014 pour des aspects plus business.

2. Marketing et publicité

Quand on pense Big Data, on pense aussi aux publicités en ligne, ciblées en fonction de mon profil (des mots clés que j’ai pu taper, et des sites que j’ai pu visiter auparavant). Dans pêcher le client dans une baignoire, Ariane Krol et Jacques Nantel écrivent

Bienvenue dans le nouveau monde du marketing personnalisé. Un monde qui veut votre bien… et qui fera tout pour l’obtenir. Au début des années 1980, décliner, à coups de sondages auprès des consommateurs, ses stratégies en fonction des « segments » visés — ménagères de plus de 50 ans, professions libérales de moins de 35 ans ayant des revenus supérieurs à 210 000 francs et jouant au tennis au moins deux fois par mois, etc. — était le fin du fin. Les spécialistes du secteur pêchaient en quelque sorte au filet dérivant, après que leur sonar avait signalé un banc de poissons de la bonne espèce. Aujourd’hui, ce ne sont plus les jeunes de moins de 35 ans ou tout autre segment qui les intéressent : c’est vous. Plus de filet, plus de sonar : la pêche se fait dans une baignoire.

En 1998, le moteur de recherche Google se contentait de suggérer des sites (comme tout moteur de recherche)

Aujourd’hui, Google va beaucoup plus loin. En 2013, sur 14 Mds$ de chiffre d’affaire de la compagnie (au premier trimestre), 85% provenait de la vente d’espace publicitaire (8.640 Mds$ pour Google AdWords, et 3.262 Mds$ pour Google AdSense). En tant qu’économiste (théorique), ce marché pose des problèmes fascinants, de recherche d’équilibre dans un jeu non coopératif, répété, en temps continu (on pourra relire à ce sujet computer scientists optimize innovative ad auction, de Sara Robinson, ou toward an integrated framework for automateddevelopment and optimization of online advertising campaigns, de Stamatina Thomaidou, Michaelis Vazirgiannis et Kyriakos Liakopoulos). Mais cela pose aussi des difficultés aux entreprises qui souhaitent acheter de la publicité lorsqu’un mot clé est tapé. Comme le rappelle Stephen Baker dans une petite anecdote dans l’introduction de the Numerati, on peut trouver des corrélations intéressantes en regardant les données, en particulier pour essayer de comprendre quand les gens louent de voitures de location (afin de lisser une bannière publicitaire): il évoque ainsi une corrélation avec les billets d’avion, les couronnes funéraires, et les films romantiques.

‘What is it about romantic-movie lovers?’ Morgan asks, as we sit in his New York office on a darkening summer afternoon. The advertising entrepreneur is flush with details about our ramblings online. He can trace the patterns of our migrations, as if we were swallows or humpback whales, while we move from site to site. Recently he’s become intrigued by the people who click most often on an ad for car rentals. Among them, the largest group had paid a visit to online obituary listings. That makes sense, he says, over the patter of rain against the windows. ‘Someone dies, so you fly to the funeral and rent a car.’ But it’s the second-largest group that has Morgan scratching his head. Romantic-movie lovers. For some reason Morgan can’t fathom, loads of them seem drawn to a banner ad for Alamo Rent A Car. (…) I ask him about the correlation he told me about earlier, the one between romantic-movie fans and Alamo Rent A Car. It takes a moment for him to recall it. ‘Oh yeah. They were off the charts.’ Did his researchers, I ask, ever come up with an explanation for it. He nods. “It had to do with weekends. It was Alamo ads promoting ‘escapes’ that attracted the attention of these web surfers, he says. The romantic-movie fans booked leisure rentals, largely for weekend getaways. Perhaps they wanted to act out the kind of scenes that drew them to the cinema. Banners for weekday rentals apparently left them cold.

Il est important d’avoir accès à des gros volumes de données afin d’acheter des espaces publicitaires, sur les bons mots clés. Mais en plus, il faut intégrer la composante dynamique : acheter un espace publicitaire, c’est un premier pas. Mais il peut être intéressant de réapparaitre régulièrement, après, en tenant compte de la courbe d’oubli (dans l’idée des travaux d’Hermann Ebbinghaus, par exemple)

Il ne faut pas s’étonner des voir réapparaître régulièrement des espaces publicitaires sur les crèmes solaires sur plusieurs sites visités, si on a demandé le prix du billet Montréal-Miami chez un vendeur en ligne..

  • la perte de contrôle, Big Data et Big Brother

Revenons maintenant au second terme du titre de la soirée, Big Brother.

1. Statistique et collecte de données

Avant de commencer, juste une petite réflexion en passant. Comme le note Alain Desrosières, pour définir le mot statistiqueon remonte traditionnellement à Abriß der neuen Staatswissenschaft der vornehmen Europäischen Reiche und Republiken, publié en 1749 par Gottfried Achenwall: dans cet ouvrage, le mot Statistik (ou Staatwissenschaft ou Kameralwissenschaft) est souvent associé à des données démographiques, de taux de fécondité, ou de nombre de conscrits. On notera que ces deux quantités sont associées à la puissance et la richesse d’un état, au XVIIIème siècle en tous cas. En ce sens, je rejoindrais Daniel Solove notait que la bonne comparaison n’était peut-être pas 1984 d’Orwell, mais davantage le Procès de Kafka (le film d’Orson Welles est d’ailleurs en visionnage libre sur http://openculture.com/). Dans le Procès, il n’y a pas explicitement de surveillance, mais juste une bureaucratie qui stocke des données, toutes sortes de données. Le héros se sent dépossédé quand on l’accuse, et impuissant. Comme quand on se fait refuser un retrait dans une machine ATM au Mexique, parce qu’un algorithme a jugé qu’il était improbable qu’un jeudi, à 14:17, vous soyez sur une plage de la Riviera Maya.

Comme on l’a rappelé au tout début, cette statistique purement descriptive (on se contentait de collecter des données) a évolué vers une statistique inférentielle par la suite. Comme le rappellent Maurice Kendall en 1942 et Victor Hilts en 1978, la Royal Statistical Society, fondée en 1834 avait pour devise Aliis Exterendum, expliquant clairement qu’il n’était dans dans les attributions d’un statisticien d’interpréter les données (on comprend qu’en 1857, les membres de cette éminente société savante l’aient changé). Cette transition vers l’inférence statistique (ce qui est aujourd’hui appelé analytics par certains) a révolutionné la science, comme le notent Ronald Nelson, Mats Pettersson et Örjan Carlborg dans a century after Fisher: time for a new paradigm in quantitative geneticsOn pourra aussi relire Maurice Kendall, parlant de l’expansion de la statistique,

They have already overrun every branch of science with a rapidity of conquest rivalled only by Attila, Mohammed, and the Colorado beetle

Mais mon point ici est que collecter des données n’est pas récent, et pas uniquement lié au développement des ordinateurs, et de l’internet, loin de là. Sur le sectre de Big Brother, on pourra relire à ce sujet l’article paru dans Newsweek en juillet 1970,

2. De la granularité des statistiques

La Statistik pose un problème profond de granularité: à quel niveau de détail peut-on avoir accès à des données?

i) il est possible d’avoir accès à des données très fines, mais par des techniques d’enquêtes (consommation de riz par mois, temps passé devant la télévision, etc), et donc non-exhaustives

ii) de l’autre, quand des données exhaustives sont connues, il est délicat de les communiquer (on pensera au patrimoine financier, et tout autre information connues par les centres des impôts et des taxes). La réponse classique est alors d’agréger les données par région géographique ou par classe d’âge.

Un contre-exemple (suffisamment rare pour être repris) est peut-être le cas des données du Census américain (repris par Bill Rankin) qui a permis d’obtenir des informations sur le revenu et des informations raciales à des degrés de granularité très très fin

iii) entre les deux, reposant sur le mouvement de libération de données (open-data), ces organismes sont désormais tenu de divulguer des informations, mais souvent, les données sont agrégées, afin d’éviter tout soucis de confidentialité (les textes légaux sont plus anciens, comme le Freedom of Information Act, datant de 1966). Mais l’agrégation pose des soucis d’interprétation, comme le rappellent toutes les études sur la notion d’ecological fallacy.

3. Les données individuelles

On peut raconter beaucoup de choses à partir de données individuelles. En octobre dernier, dans I challenged hackers to investigate me and what they found out is chilling, Adam Penenberg tente ainsi de reproduire la filature de Sophie Calle mais uniquement à l’aide des traces numériques. Dans un autre style, en mars 2012, Stephen Wolfram s’est amusé à retracer 10 ans de sa vie à partir du détail de ses communications téléphoniques, des courriels envoyés et reçus, etc. Dans the personal analytics of my life il revient sur le rythme de ses journées,

A quoi cette information peut-elle servir? Je me souviens en mai dernier, je passais une visite médicale pour l’immigration, et le médecin m’a demandé “et à part ça, vous dormez bien?”. Il ne s’agit pas d’une vraie question piège, juste d’une question de routine. Je suppose. Et pourtant, ça m’a déstabilisé. Car ça dépend. Je dors moins que mes enfants. Moins que je devrais, probablement. J’ai des périodes d’insomnies, souvent quand les enfants font des cauchemars. Mais comment un médecin peut-il travaillé avec des données aussi pourries? C’est un peu comme quand mon fils m’explique qu’il a mal au ventre. Ça fait pas très mal, mais un peu. Et par moments. Pour savoir si je dors bien, il pourrait me mettre des électrodes, et me suivre pendant 2 mois. Si j’avais un téléphone ‘intelligent‘, je pourrais aussi avoir un traqueur qui enregistre des informations pendant mon sommeil

Cela dit, je peux aussi regarder mes logs de connexion, via mes envois de courriels, ou via Twitter (j’avais réellement fait cette étude il y a quelques mois).

4. Le terrorisme, la NSA et les données individuelles

A en lire certains médias, un moment clé dans l’histoire du Big Data aura été les révélations d’Edward Snowden, qui a rendu public, pendant l’été 2013 (via le Guardian et le Washington Post), un certain nombre d’information considérées comme secrètes de la NSA, concernant par exemple la captation des métadonnées d’appels téléphoniques aux États-Unis, ainsi que les systèmes d’écoute sur internet (le programme de surveillance PRISM, mais aussi le programme de surveillance Tempora du gouvernement britannique). Cela dit, ces révélations ont surtout permis de confirmer, par des documents officiels, certaines informations connues de beaucoup de monde. En 2008, dans the Numerati, Stephen Baker s’étonnait déjà du fait qu’à la NSA, la statistique avait remplacé la cryptographie. Depuis 2001, il ne s’agit plus de décrypter les messages coder, mais d’identifier des comportements atypiques, qui pourraient être associées à des activités terroristes.

What sorts of data would fuel the hunt for terrorists? Practically anything the government could get its hands on. In the years following 9/11/1 , the government spent more than $1 billion to merge its enormous databases, including those of the FBI and the CIA. This would give data miners a single unified resource. But that wasn’t all. They would also trawl, oceans of consumer and demographic details, airline records and hotel receipts, along with videos, photos, and millions of hours of international phone and Internet traffic harvested by the NSA. This trove matched anything that the Web giants Yahoo and Google were grappling with. In May 2006, news surfaced that the NSA was secretly extending its nets even fur­ther. USA Today reported that major phone companies had delivered hundreds of billions of phone records to the govern­ment. These provided details on who was calling whom, from where, for how long, and whether the call was forwarded. Were the NSA staff also listening in on the calls and reading the e-mails? There was no telling.

On essaye ici de quitter l’homme moyen d’Adolphe Quetelet pour détecter les déviations par rapport à une norme.

For the re­searchers to pick out these oudiers, they must first figure out what’s ‘normal’. Picture our society on a big piece of poster­ board. At first glance it looks entirely blue, monochromatic. But step closer, and you’ll see tiny dots and strings of red. That background of blue represents boring, law-abiding (for the most part) us. Our only function on this display is to bring into relief the bits of red. Those are the suspected terrorists. (…) Are there times, I ask, when you just have too much data? When it gets in the way and confuses things? He seems taken aback by this line of questioning. ‘More data is always better,’ he says.

5. Big Data est mon ami…

Dans l’histoire de la NSA, le Big Data fait peur car on ne sait pas trop qui a accès à toutes ces informations. Et en l’occurrence, le problème visé (détecter un terroriste) est un problème plus complexe que détecter des tendances de consommation. Si j’achète un disque sur amazon, j’ai moins tendance à essayer d’effacer les traces que doivent probablement le faire des terroristes s’ils commandent en ligne des grosses quantités de nitrate d’ammonium (mais je suppose que ça doit dépendre de quel disque j’achète en ligne, on a tous nos petites faiblesses et nos petites lâchetés…). La NSA (et tous les hommes en noirs, de manière générale) font peur. Mais il existe des cas où les utilisateurs de Big Data sont (relativement) bien identifiés, et le but est lui aussi clair.

Par exemple, Google avait montré (dans une étude sur la grippe) que l’analyse des mots clés tapés dans le moteur de recherche permettait de détecter des épidémies avant que les médecins ne lancent l’alerte.

A titre individuel, je n’aimerais pas que l’on rende public toutes les recherches que je peux faire en ligne. Mais collectivement, on imagine très bien, sur cet exemple que l’agrégation d’autant d’information permet de détecter d’éventuelles tendance, malgré tout le bruit. Un autre exemple connu est celui de l’utilisation des données de Twitter pour détecter une épidémie de choléra suite au tremblement de terre en Haiti, deux semaines avant sa reconnaissance officielle. Plus proche de nous, dans how New York’s Fire Department uses data mining, Elizabeth Dwoskin. nous explique comment le NYFD utilise le Big Data pour détecter des débuts d’incendies.

Le crowdsourcing est un des gros intérêt associé au Big Data. Pour reprendre un exemple emprunté à Dick Kasperowski (et évoqué au début de ce billet) si le crowdsourcing avait existé au XVIème siècle (l’expression black swan, du poète Decimus Iunius Iuvenalis, dit Juvenal, “rara avis in terris nigroque simillima cygno” était utilisée à Londres pour décrire un évènement impossible), le débat sur les blacks swans n’aurait pas existé, car on peut espérer que quelqu’un se soit manifester pour dire que les cygnes noirs existent (avant que Willem de Vlamingh ne les découvre, en 1697) . De manière générale le crowdsourcing permet d’éviter les biais cognitifs.

On pourrait continuer à l’infini je pense… et je veux en garder un peu pour la semaine prochaine. Et si ça ne suffit pas (je pense aux aspects de visualisation des données), je reviendrais une nouvelle fois au Cœur des Sciences !