GEO4310 macro
From mn/geo/geoit
"lagdato"<-function(x,nn) { if (nn==12){ dato<-as.integer(x/100) til<-(x -dato*100)/nn-1/nn dato<-dato+til }else { if (nn==365){ dato<-as.integer(x/10000) mnd<-as.integer(x/100) dag<-x-mnd*100 mnd<-mnd-dato*100 ndag<-0 if (mnd==2) ndag<-31 if (mnd==3) ndag<-31+28 if (mnd==4) ndag<-31+28+31 if (mnd==5) ndag<-31+28+31+30 if (mnd==6) ndag<-31+28+31+30+31 if (mnd==7) ndag<-31+28+31+30+31+30 if (mnd==8) ndag<-31+28+31+30+31+30+31 if (mnd==9) ndag<-31+28+31+30+31+30+31+31 if (mnd==10) ndag<-31+28+31+30+31+30+31+31+30 if (mnd==11) ndag<-31+28+31+30+31+30+31+31+30+31 if (mnd==12) ndag<-31+28+31+30+31+30+31+31+30+31+30 til<-(dag+ndag-1)/nn dato<-dato+til }else { dato<-x }} dato } "see_data"<-function(x,nn,nr=1,nc=1,ptrend=F) { tkt<-colnames(x) stor<-dim(x) ant<-stor[2] top<-stor[1] if(ptrend){ trnd<-trend(x,nn) xh<-seq(1:2) yh<-seq(1:2) } top<-as.integer(top) maksd<-lagdato(x[top,1],nn) mind<-lagdato(x[1,1],nn) xverdi<-seq(from=mind,to=maksd,length=top) par(mfrow=c(nr,nc),ask=T) for(i in 2:ant) { ii<-i-1 ttt<-tkt[i] plot(xverdi,x[,i],type='l',xlab='YEAR',ylab=ttt) if(ptrend){ xh[1]<-trnd[ii,1] xh[2]<-trnd[ii,2] yh[1]<-trnd[ii,3] yh[2]<-trnd[ii,3]+trnd[ii,5]*(trnd[ii,2]-trnd[ii,1]) lines(xh,yh,lty=3) } } par(mfrow=c(1,1),ask=F) } daily_to_monthly<-function(daily,...){ nr<-nrow(daily) nc<-ncol(daily) months_help<-as.integer(daily[,1]/100) months<-unique(as.integer(daily[,1]/100)) nr2<-length(months) monthly<-matrix(nrow=nr2,ncol=nc) monthly[,1]<-months for (i in 1:nr2){ monthly[i,2:nc]<-sum(daily[months_help==months[i],2:nc],...) # endret til sum() fra mean() i september 2010. % og tilbake til mean igjen da jeg skulle ha seasonal plot over temp :) Fikk først -200 grader i januar med sum-funksjonen! } colnames(monthly)<-colnames(daily) monthly } monthly_to_annual<-function(monthly,...){ nr<-nrow(monthly) nc<-ncol(monthly) years_help<-as.integer(monthly[,1]/100) years<-unique(as.integer(monthly[,1]/100)) nr2<-length(years) annual<-matrix(nrow=nr2,ncol=nc) annual[,1]<-years for (i in 1:nr2){ annual[i,2:nc]<-sum(monthly[years_help==years[i],2:nc],...) } colnames(annual)<-colnames(monthly) annual } monthly_to_onemonth<-function(monthly,month){ nr<-nrow(monthly) nc<-ncol(monthly) months_help<-monthly[,1]-as.integer(monthly[,1]/100)*100 years<-unique(as.integer(monthly[,1]/100)) nr2<-length(years) onemonth<-monthly[months_help==month,] onemonth[,1]<-years colnames(onemonth)<-colnames(monthly) onemonth } monthly_to_monthly_average<-function(monthly){ nc<-ncol(monthly) nrow<-12 m.average<-matrix(ncol=nc,nrow=12) m.average[,1]<-seq(1:12) month<-monthly[,1]-as.integer(monthly[,1]/100)*100 for (i in 1:12){ for (j in 2:nc){ m.average[i,j]<-mean(na.omit(monthly[month==i,j])) } } colnames(m.average)<-colnames(monthly) m.average } "plot_seasonal"<-function(monthly,nr=1,nc=1) { tkt<-colnames(monthly) nrr<-nrow(monthly) ncc<-ncol(monthly) xv<-seq(1:nrr) par(mfrow=c(nr,nc),ask=T) for(i in 2:ncc){ ttt<-tkt[i] plot(xv,monthly[,i],type='l',xlab='MONTH',ylab=ttt) } par(mfrow=c(1,1),ask=F) } "histo"<-function(x,nr=1,nc=1) { tkt<-colnames(x) npr<-nrow(x) npc<-ncol(x) par(mfrow=c(nr,nc),ask=T) for(i in 2:npc) { ttt<-tkt[i] hist(x[,i],main=NULL) title(main=ttt, adj=0) title(ylab='Frequency') } par(mfrow=c(1,1),ask=F) } moment <- function(data) { stor<-dim(data) ant<-stor[2] tkt<-names(data) moments<-matrix(nrow=ant, ncol=5, dimnames=list(tkt,c("mean", "stdv", "cv", "skew", "kurt"))) for(i in 1:ant) { x<-na.omit(data[,i]) moments[i,1]<-mean(x) moments[i,2]<-sqrt(var(x)) moments[i,3]<-sqrt(var(x))/mean(x) moments[i,4]<-skewness(x) moments[i,5]<-kurtosis(x) } return(moments) } "skewness" <- function(x) { ll<-length(x) hh<-sum((x-mean(x))^3)/(sqrt(var(x))^3) hh*ll/((ll-1)*(ll-2)) } "kurtosis" <- function(x) { mm<-mean(x) ss<-as.vector(sqrt(var(x))) ll<-length(x) hh<-(x-mm)/ss hh<-hh^4 (ll*(ll+1))/((ll-1)*(ll-2)*(ll-3))*sum(hh) } "ktrend" <- function(x,y) { pp<-lm(y~x) pt<-summary(pp) pt<-coef(pt) co<-pt[1:2] se<-pt[3:4] return(co,se) } "trend" <- function(x,nn) { tkt<-names(x) nc<-ncol(x) nr<-nrow(x) maksd<-lagdato(x[nr,1],nn) mind<-lagdato(x[1,1],nn) xverdi<-seq(from=mind,to=maksd,length=nr) # xverdi<-xverdi-min(xverdi) coe<-matrix(nrow=nc-1, ncol=6, dimnames=list(tkt[2:nc],c("Start_date","End_date","Intercept", "S.E.", "Slope", "S.E."))) for (i in 2:nc) { ii<-i-1 yy<-x[,1:2] yy[,1]<-xverdi yy[,2]<-x[,i] yy<-na.omit(yy) coe[ii,1]<-yy[1,1] coe[ii,2]<-yy[nrow(yy),1] yy[,1]<-yy[,1]-yy[1,1] xy<-yy[,2] xx<-yy[,1] hh<-ktrend(xx,xy) coe[ii,3]<-hh$co[1] coe[ii,4]<-hh$se[1] coe[ii,5]<-hh$co[2] coe[ii,6]<-hh$se[2] } return(coe) } "run.vector"<-function(x) { y<-NULL z<-mean(x) for(i in 1:length(x)) { if(z>=x[i]) y[i]<-1 else y[i]<-0 } y } "ant.runs"<-function(x) { y<-1 for(i in 1:(length(x)-1)) { if(x[i]==x[i+1])y<-y else y<-y+1 } y } "run.test"<-function(x) { x<-na.omit(x) N<-length(x) mu<-N/2+1 sigma<-N*(N-2)/(4*(N-1)) y<-run.vector(x) z<-ant.runs(y) a<-qnorm(0.025,mu,sqrt(sigma)) b<-qnorm(0.975,mu,sqrt(sigma)) return(a,z,b) } "runtest"<-function(x) { stor<-dim(x) ant<-stor[2] tkt<-names(x) runt<-matrix(nrow=ant, ncol=3, dimnames=list(tkt,c("a", "z", "b"))) for(i in 1:ant) { yy<-x[,i] hh<-(run.test(yy)) runt[i,1]<-hh$a runt[i,2]<-hh$z runt[i,3]<-hh$b } return(runt) } "newcor"<-function(xx) { tkt<-names(xx) stor<-dim(xx) ant<-as.integer(stor[2]) top<-as.integer(stor[1]) kor<-matrix(nrow=ant, ncol=ant, dimnames=list(tkt,tkt)) ss<-matrix(nrow=top,ncol=2) ha<-ant-1 for(i in 1:ha) { kor[i,i]<-1.0 kk<-i+1 for(j in kk:ant) { ss<-xx[1:top,c(i,j)] tt<-na.omit(ss) ts<-nrow(tt) if (ts==0) kor[i,j]<-NA else kor[i,j]<-cor(tt)[1,2] kor[j,i]<-kor[i,j] } } kor[ant,ant]<-1.0 return(kor) } "autocor"<-function(x,nrr=1,ncc=1,...) { tkt<-colnames(x) nc<-ncol(x) nr<-nrow(x) par(mfrow=c(nrr,ncc)) for(i in 2:nc) { ttt<-tkt[i] ha<-x[,i] acfun<-acf(ha,plot=F,...) plot(acfun$lag,acfun$acf,type='l',main=ttt,xlab='Lag',ylab='ACF') par(ask=T) } } "spectral"<-function(x,nrr=1,ncc=1,alog='x',...){ nc<-ncol(x) nr<-nrow(x) par(mfrow=c(nrr,ncc),ask=T) for (i in 2:nc){ spec<-spectrum(na.omit(x[,i]),plot=F,...) plot(1/spec$freq,spec$spec,type='l',log=alog,lty=2,xlab='Wavelength',ylab='Spectrum') } par(mfrow=c(1,1),ask=F) } "norm.diag"<- function(z) { # # produces diagnostic plots for output of # norm.fit stored in z # n <- length(z$data) x <- (1:n)/(n + 1) { if(z$trans) { par(mfrow = c(2, 2)) norm.pp(c(0,1), z$data) norm.qq(c(0,1), z$data) norm.his(c(0,1), z$data) } else { par(mfrow = c(2, 2)) norm.pp(z$mle, z$data) norm.qq(z$mle, z$data) norm.his(z$mle, z$data) } par(mfrow = c(1, 1)) invisible() } } "norm.fit"<- function (xdat, ydat = NULL, mul = NULL, sigl = NULL, mulink = identity, siglink = identity, show = TRUE, method = "Nelder-Mead", maxit = 10000, ...) { xdat<-as.numeric(na.omit(xdat)) z <- list() npmu <- length(mul) + 1 npsc <- length(sigl) + 1 z$trans <- FALSE in2 <- sqrt(var(xdat)) in1 <- mean(xdat) if (is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- TRUE mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if (is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- TRUE sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } z$model <- list(mul, sigl) z$link <- deparse(substitute(c(mulink, siglink))) init <- c(muinit, siginit) norm.lik<-function(a){ mu <- mulink(mumat %*% (a[1:npmu])) sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) y <- (xdat - mu)/sc -1*sum(log(1/sqrt(2*pi*sc^2))) + sum(0.5*y^2) } x <- optim(init, norm.lik, hessian = TRUE, method = method, control = list(maxit = maxit, ...)) z$conv <- x$convergence mu <- mulink(mumat %*% (x$par[1:npmu])) sc <- siglink(sigmat %*% (x$par[seq(npmu + 1, length = npsc)])) z$nllh <- x$value z$data <- xdat if (z$trans) { z$data <- (as.vector((xdat - mu)/sc)) } z$mle <- x$par z$cov <- solve(x$hessian) z$se <- sqrt(diag(z$cov)) z$vals <- cbind(mu, sc) if (show) { if (z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if (!z$conv) print(z[c(5, 7, 9)]) } invisible(z) } "norm.his"<- function(a, dat) { # # Plots histogram of data and fitted density # for output of norm.fit stored in z # h <- hist(dat, prob = T, plot = F) x <- seq(min(dat), max(dat), length = 100) y <- dnorm(x,a[1],a[2]) hist(dat, prob = T, ylim = c(0, max(y)), xlab = "x", ylab = "f(x)", main = "Density Plot") lines(x, y, col = 4) } "norm.pp"<- function(a, dat) { # # sub-function for norm.diag # produces probability plot # plot((1:length(dat))/length(dat), pnorm(sort(dat),a[1],a[2]), xlab = "empirical",ylab ="model",main = "Probability plot") abline(0, 1, col = 4) } "norm.qq"<- function(a, dat) { # # function called by norm.diag # produces quantile plot # plot(sort(dat), qnorm((1:length(dat)/(length(dat) + 1)),a[1],a[2]), xlab = "empirical", ylab = "model", main = "Quantile Plot") abline(0, 1, col = 4) } "lnorm.diag"<- function(z) { # # produces diagnostic plots for output of # lnorm.fit stored in z # n <- length(z$data) x <- (1:n)/(n + 1) { if(z$trans) { par(mfrow = c(2, 2)) norm.pp(c(0,1), z$data) norm.qq(c(0,1), z$data) norm.his(c(0,1), z$data) } else { par(mfrow = c(2, 2)) lnorm.pp(z$mle, z$data) lnorm.qq(z$mle, z$data) lnorm.his(z$mle, z$data) } par(mfrow = c(1, 1)) invisible() } } "lnorm.fit"<- function (xdat, ydat = NULL, mul = NULL, sigl = NULL, mulink = identity, siglink = identity, show = TRUE, method = "Nelder-Mead", maxit = 10000, ...) { xdat<-as.numeric(na.omit(xdat)) xdat<-xdat[xdat>0.0] z <- list() npmu <- length(mul) + 1 npsc <- length(sigl) + 1 z$trans <- FALSE in2 <- sqrt(var(log(xdat))) in1 <- mean(log(xdat)) if (is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- TRUE mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if (is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- TRUE sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } z$model <- list(mul, sigl) z$link <- deparse(substitute(c(mulink, siglink))) init <- c(muinit, siginit) lnorm.lik<-function(a){ mu <- mulink(mumat %*% (a[1:npmu])) sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) y <- (log(xdat) - mu)/sc -1*sum(log(1/sqrt(2*pi*sc^2*xdat^2))) + sum(0.5*y^2) } x <- optim(init, lnorm.lik, hessian = TRUE, method = method, control = list(maxit = maxit, ...)) z$conv <- x$convergence mu <- mulink(mumat %*% (x$par[1:npmu])) sc <- siglink(sigmat %*% (x$par[seq(npmu + 1, length = npsc)])) z$nllh <- x$value z$data <- xdat if (z$trans) { z$data <- (as.vector((log(xdat) - mu)/sc)) } z$mle <- x$par z$cov <- solve(x$hessian) z$se <- sqrt(diag(z$cov)) z$vals <- cbind(mu, sc) if (show) { if (z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if (!z$conv) print(z[c(5, 7, 9)]) } invisible(z) } "lnorm.his"<- function(a, dat) { # # Plots histogram of data and fitted density # for output of gev.fit stored in z # h <- hist(dat, prob = T, plot = F) x <- seq(min(dat), max(dat), length = 100) y <- dlnorm(x,a[1],a[2]) hist(dat, prob = T, ylim = c(0, max(y)), xlab = "x", ylab = "f(x)", main = "Density Plot") lines(x, y, col = 4) } "lnorm.pp"<- function(a, dat) { # # sub-function for lnorm.diag # produces probability plot # plot((1:length(dat))/length(dat), plnorm(sort(dat),a[1],a[2]), xlab = "Empirical quantiles",ylab ="Theoretical lognormal quantiles",main = "Probability plot") abline(0, 1, col = 4) } "lnorm.qq"<- function(a, dat) { # # function called by lnorm.diag # produces quantile plot # plot(sort(dat), qlnorm((1:length(dat)/(length(dat) + 1)),a[1],a[2]), xlab = "Empirical quantiles", ylab = "Theoretical lognormal quantiles", main = "Quantile Plot") abline(0, 1, col = 4) } gev.fit<-function (xdat, ydat = NULL, mul = NULL, sigl = NULL, shl = NULL, mulink = identity, siglink = identity, shlink = identity, show = TRUE, method = "Nelder-Mead", maxit = 10000, ...) { xdat<-as.numeric(na.omit(xdat)) z <- list() npmu <- length(mul) + 1 npsc <- length(sigl) + 1 npsh <- length(shl) + 1 z$trans <- FALSE in2 <- sqrt(6 * var(xdat))/pi in1 <- mean(xdat) - 0.57722 * in2 if (is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- TRUE mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if (is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- TRUE sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } if (is.null(shl)) { shmat <- as.matrix(rep(1, length(xdat))) shinit <- 0.1 } else { z$trans <- TRUE shmat <- cbind(rep(1, length(xdat)), ydat[, shl]) shinit <- c(0.1, rep(0, length(shl))) } z$model <- list(mul, sigl, shl) z$link <- deparse(substitute(c(mulink, siglink, shlink))) init <- c(muinit, siginit, shinit) gev.lik <- function(a) { mu <- mulink(mumat %*% (a[1:npmu])) sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) xi <- shlink(shmat %*% (a[seq(npmu + npsc + 1, length = npsh)])) y <- (xdat - mu)/sc y <- 1 + xi * y if (any(y <= 0) || any(sc <= 0)) return(10^6) sum(log(sc)) + sum(y^(-1/xi)) + sum(log(y) * (1/xi + 1)) } x <- optim(init, gev.lik, hessian = TRUE, method = method, control = list(maxit = maxit, ...)) z$conv <- x$convergence mu <- mulink(mumat %*% (x$par[1:npmu])) sc <- siglink(sigmat %*% (x$par[seq(npmu + 1, length = npsc)])) xi <- shlink(shmat %*% (x$par[seq(npmu + npsc + 1, length = npsh)])) z$nllh <- x$value z$data <- xdat if (z$trans) { z$data <- -log(as.vector((1 + (xi * (xdat - mu))/sc)^(-1/xi))) } z$mle <- x$par z$cov <- solve(x$hessian) z$se <- sqrt(diag(z$cov)) z$vals <- cbind(mu, sc, xi) if (show) { if (z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if (!z$conv) print(z[c(5, 7, 9)]) } invisible(z) } gev.diag<-function (z) { n <- length(z$data) x <- (1:n)/(n + 1) if (z$trans) { oldpar <- par(mfrow = c(1, 2)) plot(x, exp(-exp(-sort(z$data))), xlab = "Empirical", ylab = "Model") abline(0, 1, col = 4) title("Residual Probability Plot") plot(-log(-log(x)), sort(z$data), ylab = "Empirical", xlab = "Model") abline(0, 1, col = 4) title("Residual Quantile Plot (Gumbel Scale)") } else { oldpar <- par(mfrow = c(2, 2)) gev.pp(z$mle, z$data) gev.qq(z$mle, z$data) gev.rl(z$mle, z$cov, z$data) gev.his(z$mle, z$data) } par(oldpar) invisible() } gev.pp<-function (a, dat) { plot((1:length(dat))/length(dat), gevf(a, sort(dat)), xlab = "Empirical", ylab = "Model", main = "Probability Plot") abline(0, 1, col = 4) } gev.qq<-function (a, dat) { plot(sort(dat),gevq(a, 1 - (1:length(dat)/(length(dat) + 1))), ylab = "Empirical", xlab = "Model", main = "Quantile Plot") abline(0, 1, col = 4) } gev.rl<-function (a, mat, dat) { eps <- 1e-06 a1 <- a a2 <- a a3 <- a a1[1] <- a[1] + eps a2[2] <- a[2] + eps a3[3] <- a[3] + eps f <- c(seq(0.01, 0.09, by = 0.01), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.995, 0.999) q <- gevq(a, 1 - f) d1 <- (gevq(a1, 1 - f) - q)/eps d2 <- (gevq(a2, 1 - f) - q)/eps d3 <- (gevq(a3, 1 - f) - q)/eps d <- cbind(d1, d2, d3) v <- apply(d, 1, q.form, m = mat) plot(-1/log(f), q, log = "x", type = "n", xlim = c(0.1, 1000), ylim = c(min(dat, q), max(dat, q)), xlab = "Return Period", ylab = "Return Level") title("Return Level Plot") lines(-1/log(f), q) lines(-1/log(f), q + 1.96 * sqrt(v), col = 4) lines(-1/log(f), q - 1.96 * sqrt(v), col = 4) points(-1/log((1:length(dat))/(length(dat) + 1)), sort(dat)) } gevf<-function (a, z) { if (a[3] != 0) exp(-(1 + (a[3] * (z - a[1]))/a[2])^(-1/a[3])) else gum.df(z, a[1], a[2]) } pgev<-function (q, loc = 0, scale = 1, shape = 0, lower.tail = TRUE) { if (min(scale) <= 0) stop("invalid scale") if (length(shape) != 1) stop("invalid shape") q <- (q - loc)/scale if (shape == 0) p <- exp(-exp(-q)) else p <- exp(-pmax(1 + shape * q, 0)^(-1/shape)) if (!lower.tail) p <- 1 - p p } gevq<-function (a, p) { if (a[3] != 0) a[1] + (a[2] * ((-log(1 - p))^(-a[3]) - 1))/a[3] else gum.q(p, a[1], a[2]) } gev.his<-function (a, dat) { h <- hist(dat, prob = TRUE, plot = FALSE) if (a[3] < 0) { x <- seq(min(h$breaks), min(max(h$breaks), (a[1] - a[2]/a[3] - 0.001)), length = 100) } else { x <- seq(max(min(h$breaks), (a[1] - a[2]/a[3] + 0.001)), max(h$breaks), length = 100) } y <- gev.dens(a, x) hist(dat, prob = TRUE, ylim = c(0, max(y)), xlab = "z", ylab = "f(z)", main = "Density Plot") points(dat, rep(0, length(dat))) lines(x, y) } "pearson3.fit"<- function (xdat, ydat = NULL, mul = NULL, sigl = NULL,shl = NULL, mulink = identity, siglink = identity, shlink = identity, show = TRUE, method = "Nelder-Mead", maxit = 10000, ...) { xdat<-as.numeric(na.omit(xdat)) z <- list() npmu <- length(mul) + 1 npsc <- length(sigl) + 1 npsh <- length(shl) + 1 z$trans <- FALSE nn<-length(xdat) in3<-(2/(skewness(xdat)*(sqrt(nn*(nn-1)))/(nn-2)))^2 in2 <- sign(skewness(xdat))*sqrt(var(xdat)/in3) in1 <- mean(xdat)-in3*in2 # in1<-max(in1,min(xdat)*1.05) if (is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- TRUE mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if (is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- TRUE sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } if (is.null(shl)) { shmat <- as.matrix(rep(1, length(xdat))) shinit <- in3 } else { z$trans <- TRUE shmat <- cbind(rep(1, length(xdat)), ydat[, shl]) shinit <- c(in3, rep(0, length(shl))) } z$model <- list(mul, sigl, shl) z$link <- deparse(substitute(c(mulink, siglink, shlink))) init <- c(muinit, siginit, shinit) pearson3.lik<-function(a){ mu <- mulink(mumat %*% (a[1:npmu])) sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) xi <- shlink(shmat %*% (a[seq(npmu + npsc + 1, length = npsh)])) y <- xdat-mu # if (any(y <= 0) && any(xi >= 0)) # return(10^6) # if (any(y >= 0) && any(xi <= 0)) # return(10^6) -1*sum(log(1/(sc^xi*gamma(xi)))) - sum(log(y^(xi-1)))+ sum(y/sc) } x <- optim(init, pearson3.lik, hessian = TRUE, method = method, control = list(maxit = maxit, ...)) z$conv <- x$convergence mu <- mulink(mumat %*% (x$par[1:npmu])) sc <- siglink(sigmat %*% (x$par[seq(npmu + 1, length = npsc)])) xi <- shlink(shmat %*% (x$par[seq(npmu + npsc + 1, length = npsh)])) z$nllh <- x$value z$data <- xdat if (z$trans) { z$data <- (as.vector((xdat-mu)/sc)^xi) } z$mle <- x$par z$cov <- solve(x$hessian) z$se <- sqrt(diag(z$cov)) z$vals <- cbind(mu, sc, xi) if (show) { if (z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if (!z$conv) print(z[c(5, 7, 9)]) } invisible(z) } "pearson3.diag"<- function(z) { # # produces diagnostic plots for output of # gamma.fit stored in z # n <- length(z$data) x <- (1:n)/(n + 1) { if(z$trans) { par(mfrow = c(1, 2)) plot(x, exp( - exp( - sort(z$data))), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Probability Plot") plot( - log( - log(x)), sort(z$data), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Quantile Plot (Gumbel Scale)") } else { par(mfrow = c(2, 2)) pearson3.pp(z$mle, z$data) pearson3.qq(z$mle, z$data) pearson3.his(z$mle, z$data) } par(mfrow = c(1, 1)) invisible() } } "pearson3.his"<- function(a, dat) { # # Plots histogram of data and fitted density # for output of gev.fit stored in z # h <- hist(dat, prob = T, plot = F) x <- seq(min(dat), max(dat), length = 100) y <- dgamma(x-a[1],shape=a[3],scale=a[2]) hist(dat, prob = T, ylim = c(0, max(y)), xlab = "x", ylab = "f(x)", main = "Density Plot") lines(x, y, col = 4) } "pearson3.pp"<- function(a, dat) { # # sub-function for norm.diag # produces probability plot # plot((1:length(dat))/length(dat), pgamma(sort(dat)-a[1],shape=a[3],scale=a[2]), xlab = "empirical",ylab ="model",main = "Probability plot") abline(0, 1, col = 4) } "pearson3.qq"<- function(a, dat) { # # function called by pearson3.diag # produces quantile plot # plot(sort(dat), a[1]+qgamma((1:length(dat)/(length(dat) + 1)),shape=a[3],scale=a[2]), xlab = "empirical", ylab = "model", main = "Quantile Plot") abline(0, 1, col = 4) } "lpearson3.fit"<- function (xdat, ydat = NULL, mul = NULL, sigl = NULL,shl = NULL, mulink = identity, siglink = identity, shlink = identity, show = TRUE, method = "Nelder-Mead", maxit = 10000, ...) { xdat<-as.numeric(na.omit(xdat)) xdat<-xdat[xdat>0] xdat<-log(xdat) z <- list() npmu <- length(mul) + 1 npsc <- length(sigl) + 1 npsh <- length(shl) + 1 z$trans <- FALSE nn<-length(xdat) in3<-(2/(skewness(xdat)*(sqrt(nn*(nn-1)))/(nn-2)))^2 in2 <- sign(skewness(xdat))*sqrt(var(xdat)/in3) in1 <- mean(xdat)-in3*in2 if (is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- TRUE mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if (is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- TRUE sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } if (is.null(shl)) { shmat <- as.matrix(rep(1, length(xdat))) shinit <- in3 } else { z$trans <- TRUE shmat <- cbind(rep(1, length(xdat)), ydat[, shl]) shinit <- c(in3, rep(0, length(shl))) } z$model <- list(mul, sigl, shl) z$link <- deparse(substitute(c(mulink, siglink, shlink))) init <- c(muinit, siginit, shinit) lpearson3.lik<-function(a){ mu <- mulink(mumat %*% (a[1:npmu])) sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) xi <- shlink(shmat %*% (a[seq(npmu + npsc + 1, length = npsh)])) y <- xdat-mu -1*sum(log(1/(sc^xi*gamma(xi)))) - sum(log(y^(xi-1)))+ sum(y/sc) } x <- optim(init, lpearson3.lik, hessian = TRUE, method = method, control = list(maxit = maxit, ...)) z$conv <- x$convergence mu <- mulink(mumat %*% (x$par[1:npmu])) sc <- siglink(sigmat %*% (x$par[seq(npmu + 1, length = npsc)])) xi <- shlink(shmat %*% (x$par[seq(npmu + npsc + 1, length = npsh)])) z$nllh <- x$value z$data <- exp(xdat) if (z$trans) { z$data <- (as.vector((xdat-mu)/sc)^xi) } z$mle <- x$par z$cov <- solve(x$hessian) z$se <- sqrt(diag(z$cov)) z$vals <- cbind(mu, sc, xi) if (show) { if (z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if (!z$conv) print(z[c(5, 7, 9)]) } invisible(z) } "lpearson3.diag"<- function(z) { # # produces diagnostic plots for output of # gamma.fit stored in z # n <- length(z$data) x <- (1:n)/(n + 1) { if(z$trans) { par(mfrow = c(1, 2)) plot(x, exp( - exp( - sort(z$data))), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Probability Plot") plot( - log( - log(x)), sort(z$data), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Quantile Plot (Gumbel Scale)") } else { par(mfrow = c(2, 2)) lpearson3.pp(z$mle, z$data) lpearson3.qq(z$mle, z$data) lpearson3.his(z$mle, z$data) } par(mfrow = c(1, 1)) invisible() } } "lpearson3.his"<- function(a, dat) { # # Plots histogram of data and fitted density # for output of gev.fit stored in z # h <- hist(dat, prob = T, plot = F) x <- seq(min(dat), max(dat), length = 100) y <- dgamma(log(x)-a[1],shape=a[3],scale=a[2]) hist(dat, prob = T, ylim = c(0, max(y)), xlab = "x", ylab = "f(x)", main = "Density Plot") lines(x, y, col = 4) } "lpearson3.pp"<- function(a, dat) { # # sub-function for norm.diag # produces probability plot # plot((1:length(dat))/length(dat), pgamma(sort(log(dat))-a[1],shape=a[3],scale=a[2]), xlab = "empirical",ylab ="model",main = "Probability plot") abline(0, 1, col = 4) } "lpearson3.qq"<- function(a, dat) { # # function called by pearson3.diag # produces quantile plot # plot(sort(dat), exp(a[1]+qgamma((1:length(dat)/(length(dat) + 1)),shape=a[3],scale=a[2])), xlab = "empirical", ylab = "model", main = "Quantile Plot") abline(0, 1, col = 4) } "gamma.diag"<- function(z) { # # produces diagnostic plots for output of # gamma.fit stored in z # n <- length(z$data) x <- (1:n)/(n + 1) { if(z$trans) { par(mfrow = c(1, 2)) plot(x, exp( - exp( - sort(z$data))), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Probability Plot") plot( - log( - log(x)), sort(z$data), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Quantile Plot (Gumbel Scale)") } else { par(mfrow = c(2, 2)) gamma.pp(z$mle, z$data) gamma.qq(z$mle, z$data) gamma.his(z$mle, z$data) } par(mfrow = c(1, 1)) invisible() } } "gamma.fit"<- function (xdat, ydat = NULL, mul = NULL, sigl = NULL, mulink = identity, siglink = identity, show = TRUE, method = "Nelder-Mead", maxit = 10000, ...) { xdat<-as.numeric(na.omit(xdat)) xdat<-xdat[xdat>0.0] z <- list() npmu <- length(mul) + 1 npsc <- length(sigl) + 1 z$trans <- FALSE in2 <- (var(xdat))/mean(xdat) in1 <- (mean(xdat)^2)/var(xdat) if (is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- TRUE mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if (is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- TRUE sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } z$model <- list(mul, sigl) z$link <- deparse(substitute(c(mulink, siglink))) init <- c(muinit, siginit) gamma.lik<-function(a){ mu <- mulink(mumat %*% (a[1:npmu])) sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) y <- xdat -1*sum(log(1/(sc^mu*gamma(mu)))) - sum(log(y^(mu-1)))+ sum(y/sc) } x <- optim(init, gamma.lik, hessian = TRUE, method = method, control = list(maxit = maxit, ...)) z$conv <- x$convergence mu <- mulink(mumat %*% (x$par[1:npmu])) sc <- siglink(sigmat %*% (x$par[seq(npmu + 1, length = npsc)])) z$nllh <- x$value z$data <- xdat if (z$trans) { z$data <- (as.vector((xdat^mu)/sc)) } z$mle <- x$par z$cov <- solve(x$hessian) z$se <- sqrt(diag(z$cov)) z$vals <- cbind(mu, sc) if (show) { if (z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if (!z$conv) print(z[c(5, 7, 9)]) } invisible(z) } "gamma.his"<- function(a, dat) { # # Plots histogram of data and fitted density # for output of gev.fit stored in z # h <- hist(dat, prob = T, plot = F) x <- seq(min(dat), max(dat), length = 100) y <- dgamma(x,a[1],scale=a[2]) hist(dat, prob = T, ylim = c(0, max(y)), xlab = "x", ylab = "f(x)", main = "Density Plot") lines(x, y, col = 4) } "gamma.pp"<- function(a, dat) { # # sub-function for norm.diag # produces probability plot # plot((1:length(dat))/length(dat), pgamma(sort(dat),a[1],scale=a[2]), xlab = "Empirical",ylab ="Theoretical gamma quantiles",main = "Probability plot") abline(0, 1, col = 4) } "gamma.qq"<- function(a, dat) { # # function called by norm.diag # produces quantile plot # plot(sort(dat), qgamma((1:length(dat)/(length(dat) + 1)),a[1],scale=a[2]), xlab = "Empirical quantiles", ylab = "Theoretical gamma quantiles", main = "Quantile Plot") abline(0, 1, col = 4) } "weibull.diag"<- function(z) { # # produces diagnostic plots for output of # gev.fit stored in z # n <- length(z$data) x <- (1:n)/(n + 1) { if(z$trans) { par(mfrow = c(1, 2)) plot(x, exp( - exp( - sort(z$data))), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Probability Plot") plot( - log( - log(x)), sort(z$data), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Quantile Plot (Gumbel Scale)") } else { par(mfrow = c(2, 2)) weibull.pp(z$mle, z$data) weibull.qq(z$mle, z$data) weibull.his(z$mle, z$data) } par(mfrow = c(1, 1)) invisible() } } "weibull.fit"<- function(xdat, ydat = NULL, mul = NULL, sigl = NULL, mulink = identity, siglink = identity) { xdat<-as.numeric(na.omit(xdat)) # # obtains mles etc for norm. distn # z <- list() z$trans <- F # if maximization fails, could try # changing in1 and in2 which are # initial values for minimization routine in2 <- 8 in1 <- 3 if(is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- T mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if(is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- T sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } z$model <- list(mul, sigl) z$link <- deparse(substitute(c(mulink, siglink))) init <- c(muinit, siginit) assign("xdat", xdat, frame = 1) assign("mumat", mumat, frame = 1) assign("mul", mul, frame = 1) assign("mulink", mulink, frame = 1) assign("sigmat", sigmat, frame = 1) assign("sigl", sigl, frame = 1) assign("siglink", siglink, frame = 1) x <- nlmin(weibull.lik, init, max.fcal = 200, max.iter = 150, print = 0) z$conv <- x$converged if(z$conv) { z$nllh <- weibull.lik(x$x) h <- hess(weibull.lik, x$x) z$data <- xdat z$trans if(z$trans) { z$data <- (as.vector((xdat - mu)/sc)) } z$mle <- x$x z$se <- sqrt(diag(h)) z$cov <- h z$vals <- cbind(mu, sc,) } if(z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if(z$conv) { print(z[c(5, 7, 8)]) } invisible(z) } "weibull.his"<- function(a, dat) { # # Plots histogram of data and fitted density # for output of gev.fit stored in z # h <- hist(dat, prob = T, plot = F) x <- seq(min(dat), max(dat), length = 100) y <- dweibull(x,a[1],a[2]) hist(dat, prob = T, ylim = c(0, max(y)), xlab = "x", ylab = "f(x)", main = "Density Plot") lines(x, y, col = 4) } "weibull.lik"<- function(a) { # # support routine for norm.fit # computes neg log lik of norm model # npmu <- length(mul) + 1 mu <- mulink(mumat %*% (a[1:npmu])) npsc <- length(sigl) + 1 sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) assign("mu", mu, frame = 1) assign("sc", sc, frame = 1) y <- xdat/sc if(min(sc) <= 0 | min(mu) <= 0) l <- 10^10 else l <- sum((y/sc)^mu)-sum(log((mu/sc)*(y^(mu-1)))) l } "identity"<- function(x) { # # identity link function # x } "hess"<- function(f, x, ep = 0.0001) { # # generic routine for finding approximate # inverse of observed information matrix # eps <- ep * x n <- length(x) m <- matrix(0, ncol = n, nrow = n) for(i in 1:n) { for(j in 1:n) { x1 <- x x1[i] <- x1[i] + eps[i] x1[j] <- x1[j] + eps[j] x2 <- x x2[i] <- x2[i] + eps[i] x2[j] <- x2[j] - eps[j] x3 <- x x3[i] <- x3[i] - eps[i] x3[j] <- x3[j] + eps[j] x4 <- x x4[i] <- x4[i] - eps[i] x4[j] <- x4[j] - eps[j] m[i, j] <- (f(x1) - f(x2) - f(x3) + f(x4))/(4 * eps[i] * eps[j]) } } solve(m) } "weibull.pp"<- function(a, dat) { # # sub-function for norm.diag # produces probability plot # plot((1:length(dat))/length(dat), pweibull(sort(dat),a[1],a[2]), xlab = "empirical",ylab ="model",main = "Probability plot") abline(0, 1, col = 4) } "weibull.qq"<- function(a, dat) { # # function called by norm.diag # produces quantile plot # plot(sort(dat), qweibull((1:length(dat)/(length(dat) + 1)),a[1],a[2]), xlab = "empirical", ylab = "model", main = "Quantile Plot") abline(0, 1, col = 4) } "pareto.diag"<- function(z) { # # produces diagnostic plots for output of # gev.fit stored in z # n <- length(z$data) x <- (1:n)/(n + 1) { if(z$trans) { par(mfrow = c(1, 2)) plot(x, exp( - exp( - sort(z$data))), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Probability Plot") plot( - log( - log(x)), sort(z$data), xlab = "empirical", ylab = "model") abline(0, 1, col = 4) title("Residual Quantile Plot (Gumbel Scale)") } else { par(mfrow = c(2, 2)) pareto.pp(z$mle, z$data) pareto.qq(z$mle, z$data) pareto.his(z$mle, z$data) } par(mfrow = c(1, 1)) invisible() } } "pareto.fit"<- function(xdat, ydat = NULL, mul = NULL, sigl = NULL, mulink = identity, siglink = identity) { xdat<-as.numeric(na.omit(xdat)) # # obtains mles etc for norm. distn # z <- list() z$trans <- F # if maximization fails, could try # changing in1 and in2 which are # initial values for minimization routine in2 <- 5 in1 <- 1 if(is.null(mul)) { mumat <- as.matrix(rep(1, length(xdat))) muinit <- in1 } else { z$trans <- T mumat <- cbind(rep(1, length(xdat)), ydat[, mul]) muinit <- c(in1, rep(0, length(mul))) } if(is.null(sigl)) { sigmat <- as.matrix(rep(1, length(xdat))) siginit <- in2 } else { z$trans <- T sigmat <- cbind(rep(1, length(xdat)), ydat[, sigl]) siginit <- c(in2, rep(0, length(sigl))) } z$model <- list(mul, sigl) z$link <- deparse(substitute(c(mulink, siglink))) init <- c(muinit, siginit) assign("xdat", xdat, frame = 1) assign("mumat", mumat, frame = 1) assign("mul", mul, frame = 1) assign("mulink", mulink, frame = 1) assign("sigmat", sigmat, frame = 1) assign("sigl", sigl, frame = 1) assign("siglink", siglink, frame = 1) x <- nlmin(pareto.lik, init, max.fcal = 200, max.iter = 150, print = 0) z$conv <- x$converged if(z$conv) { z$nllh <- pareto.lik(x$x) h <- hess(pareto.lik, x$x) z$data <- xdat z$trans if(z$trans) { z$data <- (as.vector((xdat - mu)/sc)) } z$mle <- x$x z$se <- sqrt(diag(h)) z$cov <- h z$vals <- cbind(mu, sc,) } if(z$trans) print(z[c(2, 3, 4)]) else print(z[4]) if(z$conv) { print(z[c(5, 7, 8)]) } invisible(z) } "pareto.his"<- function(a, dat) { # # Plots histogram of data and fitted density # for output of gev.fit stored in z # h <- hist(dat, prob = T, plot = F) x <- seq(min(dat), max(dat), length = 100) y <- dpareto(x,a[1],a[2]) hist(dat, prob = T, ylim = c(0, max(y)), xlab = "x", ylab = "f(x)", main = "Density Plot") lines(x, y, col = 4) } "pareto.lik"<- function(a) { # # support routine for norm.fit # computes neg log lik of norm model # npmu <- length(mul) + 1 mu <- mulink(mumat %*% (a[1:npmu])) npsc <- length(sigl) + 1 sc <- siglink(sigmat %*% (a[seq(npmu + 1, length = npsc)])) assign("mu", mu, frame = 1) assign("sc", sc, frame = 1) y <- xdat+mu if(min(sc) <= 0 ) l <- 10^10 else l <- -1*sum(log(sc*(mu^sc) * (y^(-sc-1)))) l } "pareto.pp"<- function(a, dat) { # # sub-function for norm.diag # produces probability plot # plot((1:length(dat))/length(dat), ppareto(sort(dat),a[1],a[2]), xlab = "empirical",ylab ="model",main = "Probability plot") abline(0, 1, col = 4) } "pareto.qq"<- function(a, dat) { # # function called by norm.diag # produces quantile plot # plot(sort(dat), qpareto((1:length(dat)/(length(dat) + 1)),a[1],a[2]), xlab = "empirical", ylab = "model", main = "Quantile Plot") abline(0, 1, col = 4) } "ppareto"<- function(x,C,a) { 1-(C/(x+C))^a } "qpareto"<- function(x,C,a) { C*(1-x)^(-1/a)-C } "dpareto"<- function(x,C,a) { a*C^a*(x+C)^(-(a+1)) } lmoment<-function(data) { stor<-dim(data) ant<-stor[2] tkt<-names(data) moments<-matrix(nrow=ant, ncol=7, dimnames=list(tkt,c("L1", "L2", "T2", "L3", "T3", "L4", "T4"))) for(i in 1:ant) { x<-na.omit(data[,i]) beta0<-0 beta1<-0 beta2<-0 beta3<-0 rang<-rank(x) l<-length(x) beta0<-sum(x)/l beta1<-sum(x*(rang-0.35)/l)/l beta2<-sum(x*((rang-0.35)/l)**2)/l beta3<-sum(x*((rang-0.35)/l)**3)/l moments[i,1]<-beta0 moments[i,2]<-2*beta1-beta0 moments[i,3]<-moments[i,2]/moments[i,1] moments[i,4]<-6*beta2-6*beta1+beta0 moments[i,5]<-moments[i,4]/moments[i,2] moments[i,6]<-20*beta3-30*beta2+12*beta1-beta0 moments[i,7]<-moments[i,6]/moments[i,2] } return(moments) } lmom<-function(data) { ant<-1 tkt<-names(data) moments<-matrix(nrow=ant, ncol=7, dimnames=list(tkt,c("L1", "L2", "T2", "L3", "T3", "L4", "T4"))) x<-na.omit(data) i<-1 beta0<-0 beta1<-0 beta2<-0 beta3<-0 rang<-rank(x) l<-length(x) beta0<-sum(x)/l beta1<-sum(x*(rang-0.35)/l)/l beta2<-sum(x*((rang-0.35)/l)**2)/l beta3<-sum(x*((rang-0.35)/l)**3)/l moments[i,1]<-beta0 moments[i,2]<-2*beta1-beta0 moments[i,3]<-moments[i,2]/moments[i,1] moments[i,4]<-6*beta2-6*beta1+beta0 moments[i,5]<-moments[i,4]/moments[i,2] moments[i,6]<-20*beta3-30*beta2+12*beta1-beta0 moments[i,7]<-moments[i,6]/moments[i,2] return(moments) } mannk<-function(data) { stor<-dim(data) ant<-stor[2] tkt<-names(data) mk<-matrix(nrow=ant, ncol=4, dimnames=list(tkt,c("a", "z", "b" , "N"))) for(i in 1:ant) { yy<-na.omit(data[,i]) hh<-(mann.kendall(yy)) mk[i,1]<-hh$a mk[i,2]<-hh$Z mk[i,3]<-hh$b mk[i,4]<-length(yy) } return(mk) } "Mann.kendall.vekt"<-function(x) { S<-NULL for(i in 1:(length(x) - 1)) { S[i]<-0 for(j in (i + 1):length(x)) { if (x[i]<x[j]) S[i]<-S[i]+ 1 else { if (x[i]>x[j]) S[i]<-S[i]-1} } } S[length(x)] <- 0 S } "mann.kendall"<-function(x) { N<-length(x) y<-Mannkendallvekt(x) SS<-sum(y) if (SS<0) m <- 1 else m<- -1 Z<-(SS+m)/sqrt((N*(N-1)*(2*N+5)/18)) a<-qnorm(0.025,0,1) b<-qnorm(0.975,0,1) return(a,Z,b) } "mannkendall"<-function(x) { N<-length(x) y<-Mannkendallvekt(x) SS<-sum(y) if (SS<0) m <- 1 else m<- -1 Z<-(SS+m)/sqrt((N*(N-1)*(2*N+5)/18)) a<-qnorm(0.025,0,1) b<-qnorm(0.975,0,1) cat(" \n") cat(" H0: No trend in data\n\n") cat(" Sample size (n) :", format(N),"\n\n") cat(" Kendalls test statistics (S) :", format(SS),"\n\n") cat(" Large sample approx. (Z) :", format(Z),"\n") cat(" Z(0.025) :", format(a),"\n") cat(" Z(0.095) :", format(b),"\n\n") if(Z>a & Z<b) cat(" Failed to reject HO \n\n") else cat(" H0 rejected \n\n") if (N<=10) {cat(" Small sample size!\n") cat(" Get exact critical value from table of p-values for Kendalls Tau,\n") cat(" large sample appoximation does not apply. \n\n") } else cat("\n") } "Mannkendallvekt"<-function(x) { S<-NULL for(i in 1:(length(x) - 1)) { S[i]<-0 tx<-na.omit(x[i+1:length(x)])-x[i] S[i]<-sum(sign(tx)) } S[length(x)] <- 0 S } "Mann.kendall.vekt"<-function(x) { S<-NULL for(i in 1:(length(x) - 1)) { S[i]<-0 for(j in (i + 1):length(x)) { if (x[i]<x[j]) S[i]<-S[i]+ 1 else { if (x[i]>x[j]) S[i]<-S[i]-1} } } S[length(x)] <- 0 S } see_geor<-function(data,coords=c(1,2),dc=3){ nc<-ncol(data) par(ask=T) data[data<0]<-NA for (i in dc:nc){ event<-as.geodata(data, coords.col <-coords, data.col <- i) plot(event) } par(ask=F) } see_points<-function(data,coords=c(1,2),dc=3){ nc<-ncol(data) data[data<0]<-NA par(ask=T) for (i in dc:nc){ event<-as.geodata(data, coords.col <-coords, data.col <- i) points(event, xlab = "Coord X", ylab = "Coord Y") } par(ask=F) } see_empirical_variogram<-function(data,coords=c(1,2),dc=3,normalize=F){ nc<-ncol(data) for (i in dc:nc){ event<-as.geodata(data, coords.col <-coords, data.col <- i) if(normalize){ event$data<-event$data/sqrt(var(event$data)) } xdist<-max(event$coords[,1])-min(event$coords[,1]) ydist<-max(event$coords[,2])-min(event$coords[,2]) maxd<-max(xdist,ydist) cloud1 <- variog(event, option = "cloud") cloud2 <- variog(event, option = "cloud", estimator.type = "modulus",max.dist=maxd) bin1 <- variog(event, max.dist=maxd) bin2 <- variog(event, estimator.type = "modulus",max.dist=maxd) bin1box <- variog(event, bin.cloud = T,max.dist=maxd) bin2box <- variog(event, estimator.type = "modulus", bin.cloud = T,max.dist=maxd) par(mfrow = c(2, 2),ask=T) plot(cloud1) plot(bin1box, bin.cloud=TRUE) #plot(cloud2, main = "modulus estimator") plot(bin1) #plot(bin2, main = "modulus estimator") par(mfrow=c(1,1),ask=F) } } see_empirical_box_variogram<-function(data,coords=c(1,2),dc=3,normalize=F){ nc<-ncol(data) for (i in dc:nc){ event<-as.geodata(data, coords.col <-coords, data.col <- i) if(normalize){ event$data<-event$data/sqrt(var(event$data)) } xdist<-max(event$coords[,1])-min(event$coords[,1]) ydist<-max(event$coords[,2])-min(event$coords[,2]) maxd<-max(xdist,ydist) bin1 <- variog(event, bin.cloud = T) bin2 <- variog(event, estimator.type = "modulus", bin.cloud = T) # 28 Aug 2008, M. Morawietz: plot only the results for the classical estimator par(ask=TRUE) plot(bin1, bin.cloud = TRUE) #plot(bin2, bin.cloud = T, main = "modulus estimator") par(ask=FALSE) } } see_empirical_directional_variogram<-function(data,coords=c(1,2),dc=3,normalize=F,...){ nc<-ncol(data) for (i in dc:nc){ event<-as.geodata(data, coords.col <-coords, data.col <- i) if(normalize){ event$data<-event$data/sqrt(var(event$data)) } xdist<-max(event$coords[,1])-min(event$coords[,1]) ydist<-max(event$coords[,2])-min(event$coords[,2]) maxd<-max(xdist,ydist)*0.8 vario.4 <- variog4(event, max.dist = maxd,tolerance=pi/2,...) par(mfrow = c(1, 1),ask=T) plot(vario.4, main = "Directional variogram") par(mfrow=c(1,1),ask=F) } } average_empirical_variogram<-function(data,coords=c(1,2),dc=3,normalize=T,...){ nc<-ncol(data) nevents<-nc-dc+1 for (i in dc:nc){ event<-as.geodata(data, coords.col <-coords, data.col <- i) if(normalize){ event$data<-event$data/sqrt(var(event$data)) } xdist<-max(event$coords[,1])-min(event$coords[,1]) ydist<-max(event$coords[,2])-min(event$coords[,2]) maxd<-max(xdist,ydist)*0.8 bin1 <- variog(event, max.dist=maxd) bin2 <- variog(event, estimator.type = "modulus",max.dist=maxd) if (i == dc){ bin1a<-bin1 bin2a<-bin2 bin1a$v<-bin1a$v/nevents bin2a$v<-bin2a$v/nevents } else { bin1a$v<-bin1a$v+bin1$v/nevents bin2a$v<-bin2a$v+bin2$v/nevents } } # 28 Aug 2008, Martin Morawietz: plot only the classical estimator #par(mfrow = c(1, 2),ask=T) plot(bin1a, main="Average Semivariogram") #plot(bin2a, main = "modulus estimator") #par(mfrow=c(1,1),ask=F) out<-list() out$bin1a<-bin1a out$bin2a<-bin2a out } average_empirical_directional_variogram<-function(data,coords=c(1,2),dc=3,normalize=T,...){ nc<-ncol(data) nevents<-nc-dc+1 for (i in dc:nc){ event<-as.geodata(data, coords.col <-coords, data.col <- i) if(normalize){ event$data<-event$data/sqrt(var(event$data)) } xdist<-max(event$coords[,1])-min(event$coords[,1]) ydist<-max(event$coords[,2])-min(event$coords[,2]) maxd<-max(xdist,ydist)*0.8 vario.4 <- variog4(event, max.dist = maxd, tolerance=pi/2,...) # satt til pi/2 i st. for pi/8, 19. aug 2010. if (i == dc){ vario.4a<-vario.4 vario.4a[[1]]$v<-vario.4a[[1]]$v/nevents vario.4a[[2]]$v<-vario.4a[[2]]$v/nevents vario.4a[[3]]$v<-vario.4a[[3]]$v/nevents vario.4a[[4]]$v<-vario.4a[[4]]$v/nevents vario.4a[[5]]$v<-vario.4a[[5]]$v/nevents } else { vario.4a[[1]]$v<-vario.4a[[1]]$v+vario.4[[1]]$v/nevents vario.4a[[2]]$v<-vario.4a[[2]]$v+vario.4[[2]]$v/nevents vario.4a[[3]]$v<-vario.4a[[3]]$v+vario.4[[3]]$v/nevents vario.4a[[4]]$v<-vario.4a[[4]]$v+vario.4[[4]]$v/nevents vario.4a[[5]]$v<-vario.4a[[5]]$v+vario.4[[5]]$v/nevents } } par(mfrow = c(1, 1),ask=T) plot(vario.4a, main = "Directional average semivariance") par(mfrow=c(1,1),ask=F) vario.4a } q.form<-function (d, m) { t(as.matrix(d)) %*% m %*% as.matrix(d) } gev.dens<-function(a,z) { if (a[3]!=0){ (exp(-(1+(a[3]*(z-a[1]))/a[2])^(-1/a[3]))*(1+(a[3]*(z-a[1]))/a[2])^(-1/a[3]-1))/a[2] } else { gum.dens(c(a[1],a[2]),z) } } gum.dens<-function(a,x){ y<-(x-a[1])/a[2] (exp(-y)*exp(-exp(-y)))/a[2] } chi.square.test<-function(x,pdist,nbin,...){ out<-list() out$pdist<-pdist pdist<-match.fun(pdist) nobs<-length(x) minv<-min(x) maxv<-max(x) int<-(maxv-minv)/(nbin-1) emp<-seq(1:nbin) theo<-seq(1:nbin) bin.min<-seq(1:nbin) bin.max<-seq(1:nbin) bin.min[1]<-NaN mm<-minv+0.5*int bin.max[1]<-mm theo[1]<-pdist(mm,...) emp[1]<-length(x[x<mm]) ll<-nbin-1 for (i in 2:ll) { nn<-mm bin.min[i]<-nn mm<-mm+int bin.max[i]<-mm theo[i]<-pdist(mm,...)-pdist(nn,...) emp[i]<-length(x[x<mm])-length(x[x<nn]) } bin.min[nbin]<-mm bin.max[nbin]<-NaN theo[nbin]<-1-pdist(mm,...) emp[nbin]<-length(x[x>=mm]) emp<-emp/nobs out$nbin<-nbin out$bin.min<-bin.min out$bin.max<-bin.max out$emp<-emp out$theo<-theo out$test<-chisq.test(x=emp,p=theo) out }