diff --git a/.RData b/.RData index 2cf2d70..5bc9d9d 100644 Binary files a/.RData and b/.RData differ diff --git a/.Rbuildignore b/.Rbuildignore index 5eb8c20..02b6fed 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -2,3 +2,5 @@ ^\.Rproj\.user$ ^data-raw$ ^\.travis\.yml$ +^cran-comments\.md$ +^CRAN-SUBMISSION$ diff --git a/.Rhistory b/.Rhistory index eabe56c..c8f945e 100644 --- a/.Rhistory +++ b/.Rhistory @@ -1,512 +1,512 @@ -if (!is.null(message_out)) { -if (grepl('Version', message_out)) { -packageStartupMessage(message_out) -} -} -} -devtools::document() +debug(hindcast) +debug(preprocessHindcast) +frc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE) +frc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE) +debug(biasCorrect_core_gqm) +frc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE) devtools::check() +filePath <- system.file("extdata", "tnc.nc", package = "hyfo") +varname <- getNcdfVar(filePath) +nc <- loadNcdf(filePath, varname) +data(tgridData) +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +obsGamma <- fitdistr(obs[ind],"gamma", lower = c(0, 0)) +obsGamma <- fitdistr(obs[ind],"gamma") +obsGamma <- fitdistr(obs[ind],"gamma", lower = c(-4, -5)) +?optim +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +debug(optim) +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +method +trace("optim") +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +trace("optim") +traceback("optim") +traceback("optim") +debug(fitdistr) +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +start +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +obsGamma <- fitdistr(obs[ind],"gamma") +start +method +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +length(NULL) +start +method +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +obsGamma <- fitdistr(obs[ind],"gamma") +start +method +method +lower +length(lower) +length(lower) > 1L +npar +par +con +lower +upper +rep_len(upper) +npar +rep_len(upper, npar) +parm +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +fn +fn() +?fn +??fn +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +obsGamma <- fitdistr(obs[ind],"gamma", lower = c(0, 0), upper = c(1000, 0)) +devtools::check() +devtools::check() +devtools::check() +DT1 = data.table(A=1:3,B=letters[1:3]) +DT2 = data.table(B=letters[4:5],A=4:5) +l = list(DT1,DT2) +rbindlist(l, use.names=TRUE) +install.packages('data.table') +library(data.table) +DT1 = data.table(A=1:3,B=letters[1:3]) +DT2 = data.table(B=letters[4:5],A=4:5) +l = list(DT1,DT2) +rbindlist(l, use.names=TRUE) +DT1 +DT2 devtools::document() -devtools::install_github('Yuanchao-Xu/hyfo') -library(hyfo) +devtools::check() devtools::document() -devtools::document +devtools::check() +devtools::check() +devtools::check() +plotTS(testdl[[1]]) +plotTS(testdl[[1]], x = 'xxx', y = 'yyy', title = 'aaa') +plotTS(list = testdl) +a1 <- plotTS(testdl[[1]], output = 'ggplot', name = 1) +a2 <- plotTS(testdl[[2]], output = 'ggplot', name = 2) +plotTS_comb(a1, a2) +plotTS_comb(list = list(a1, a2), y = 'y axis', nrow = 2) +debug(plotTS_comb) +plotTS_comb(list = list(a1, a2), y = 'y axis', nrow = 2) +class(data_ggplot) +devtools::check() +a1 <- plotTS(testdl[[1]], output = 'ggplot', name = 1) +a2 <- plotTS(testdl[[2]], output = 'ggplot', name = 2) +plotTS_comb(a1, a2) +debug(plotTS_comb) +plotTS_comb(a1, a2) +class(data_ggplot) +class(data_ggplot) +class(data_ggplot) == c('data.table', 'data.frame') +data_ggplot$name +devtools::check() +a1 <- plotTS(testdl[[1]], output = 'ggplot', name = 1) +a2 <- plotTS(testdl[[2]], output = 'ggplot', name = 2) +plotTS_comb(a1, a2) +plotTS_comb(list = list(a1, a2), y = 'y axis', nrow = 2) +devtools::check() +trace("extractPeriod", browser, exit=browser, signature = c("list")) +AAA <- data.frame( +# date column +Date = seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1), +# value column +AAA = sample(1:100,length(seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1)), repl = TRUE)) +BBB <- data.frame( +Date = seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1), +BBB = sample(1:100,length(seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1)), repl = TRUE)) +CCC <- data.frame( +Date = seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1), +CCC = sample(1:100,length(seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1)), repl = TRUE)) +list <- list(AAA, BBB, CCC)# dput() and dget() can be used to save and load list file. +list_com <- extractPeriod(list, commonPeriod = TRUE) +Dates +Dates <- rbindlist(Dates) +do.call('rbind', Dates) +Dates[1] +Dates[[1]] +rbindlist(Dates) +is.list(Dates[1]) +debug(rbindlist) +rbindlist(Dates) +Dates[[1]] +Dates +datalist +str(datalist) +str(Dates) +rbindlist(datalist) devtools::document() devtools::check() -install.packages('devtools') -devtools::install_github('Yuanchao-Xu/hyfo') +debug(collectData_csv_anarbe) +file <- system.file("extdata", "1999.csv", package = "hyfo") +folder <- strsplit(file, '1999')[[1]][1] +a <- collectData_csv_anarbe(folder) +data +data <- rbindlist(data) +undebug(rbindlist) +data <- rbindlist(data) +do.call('rbind', data) +fileNames +str(data) +data <- lapply(fileNames, readColumn_csv_anarbe) +str(data) +rbindlist(data) +fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE) +data <- lapply(fileNames, readColumn_csv_anarbe) +rbindlist(data) +a <- collectData_csv_anarbe(folder) +do.call('rbind', data) +data <- do.call('rbind', data) +data <- data[, 1:2] +data +data[,1] +data <- rbindlist(data) +fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE) +data <- lapply(fileNames, readColumn_csv_anarbe) +data <- rbindlist(data) +data +data <- data[, 1:2] +data +data[, 1] +as.Date(data[, 1], format = '%d/%m/%Y') +str(data[,1]) +fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE) +data <- lapply(fileNames, readColumn_csv_anarbe) +data <- do.call('rbind', data) +data <- data[, 1:2] +data +data[, 1] +str(data[, 1]) +a <- data[, 1] +a +fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE) +data <- lapply(fileNames, readColumn_csv_anarbe) +data <- rbindlist(data) +data <- data[, 1:2] +b <- data[, 1] +b +str(b) +str(a) +as.Date(data[, 1], format = '%d/%m/%Y') +as.Date(data[1, 1], format = '%d/%m/%Y') +data[1, 1] +str(data[1, 1]) +as.Date(data[1, 1]) +data[1,] +data[1,1] +data[1,1] = '02/01/1999' +data[1,1] == '02/01/1999' +data[2,1] +data[2,1] == '03/01/1999' +data[,1] == '03/01/1999' +data[,1] +as.Date('03/01/1999') +data[, 1] +data[2, 1] +str(data[2, 1]) +data[[2]] +data[,2] +devtools::check() +debug(collectData_csv_anarbe) +a <- collectData_csv_anarbe(folder) +file <- system.file("extdata", "1999.csv", package = "hyfo") +folder <- strsplit(file, '1999')[[1]][1] +a <- collectData_csv_anarbe(folder) +devtools::check() +debug(collectData_csv_anarbe) +file <- system.file("extdata", "1999.csv", package = "hyfo") +folder <- strsplit(file, '1999')[[1]][1] +a <- collectData_csv_anarbe(folder) +data +data[, 1] <- as.Date(data[[1]], format = '%d/%m/%Y') +data +fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE) +data <- lapply(fileNames, readColumn_csv_anarbe) +data <- rbindlist(data) +data <- data[, 1:2] +data[[1]] <- as.Date(data[[1]], format = '%d/%m/%Y') +data +debug(fillGap_lmCoef) +b <- read.table(text = ' Date AAA BBB CCC DDD EEE +49 1999-12-15 24.8 21.4 25.6 35.0 17.4 +50 1999-12-16 NA 0.6 1.5 6.3 2.5 +51 1999-12-17 NA 16.3 20.3 NA 19.2 +52 1999-12-18 13 1.6 NA 6.3 0.0 +53 1999-12-19 10 36.4 12.5 26.8 24.9 +54 1999-12-20 NA 0.0 0.0 0.2 0.0 +55 1999-12-21 0.2 0.0 0.0 0.0 0.0 +56 1999-12-22 0.0 0.0 0.0 0.0 0.0') +b1 <- fillGap(b) +lmCoef +rbindlist(lmCoef) +l <- list(c("16:59:20", "100", "143.88"), c("16:59:05", "106", "143.90"), +c("16:58:49", "900", "143.92"), c("16:58:49", "100", "143.92"), +c("16:58:46", "100", "143.93"), c("16:58:46", "200", "143.93")) +transpose(l) +rbindlist(transpose(lmCoef)) +transpose(lmCoef) +lmCoef +rbindlist(l) +rbindlist(transpose(l)) +setDT(l) +l +t(setDT(l)) +setDT(t(lmCoef)) +lmCoef +l <- dim(data)[2] +m <- diag(l)# m is the coeficients matrix +m[lower.tri(m)] <- combn(data, 2, function(x) coef(lm(x[, 2] ~ x[, 1] + 0))) +tm <- t(m) +tm[lower.tri(tm)] <- combn(data, 2, function(x) coef(lm(x[, 1] ~ x[, 2] + 0))) +m <- t(tm) +lmCoef <- lapply(1 : l, function(x) m[x,corOrder[, x]]) +lmCoef +a <- setDT(t(lmCoef)) +a +a <- setDT(lmCoef) +a +lmCoef +l <- dim(data)[2] +m <- diag(l)# m is the coeficients matrix +m[lower.tri(m)] <- combn(data, 2, function(x) coef(lm(x[, 2] ~ x[, 1] + 0))) +tm <- t(m) +tm[lower.tri(tm)] <- combn(data, 2, function(x) coef(lm(x[, 1] ~ x[, 2] + 0))) +m <- t(tm) +lmCoef <- lapply(1 : l, function(x) m[x,corOrder[, x]]) +a <- setDT(lmCoef) +a +lmCoef +l <- dim(data)[2] +m <- diag(l)# m is the coeficients matrix +m[lower.tri(m)] <- combn(data, 2, function(x) coef(lm(x[, 2] ~ x[, 1] + 0))) +tm <- t(m) +tm[lower.tri(tm)] <- combn(data, 2, function(x) coef(lm(x[, 1] ~ x[, 2] + 0))) +m <- t(tm) +lmCoef <- lapply(1 : l, function(x) m[x,corOrder[, x]]) +lmCoef +t(setDT(lmCoef)) +l <- dim(data)[2] +m <- diag(l)# m is the coeficients matrix +m[lower.tri(m)] <- combn(data, 2, function(x) coef(lm(x[, 2] ~ x[, 1] + 0))) +tm <- t(m) +tm[lower.tri(tm)] <- combn(data, 2, function(x) coef(lm(x[, 1] ~ x[, 2] + 0))) +m <- t(tm) +lmCoef <- lapply(1 : l, function(x) m[x,corOrder[, x]]) +lmCoef +lmCoef[[1]] +lmCoef[[1]] <- rbind(lmCoef[[1]], lmCoef[[2]]) +lmCoef +rbindlist(lmCoef) +do.call('rbind', lmCoef) +lmCoef[[2]] <- rbind(lmCoef[[3]], lmCoef[[4]]) +lmCoef +a <- lmCoef[1:2] +a +rbindlist(a) +rbindlist(list = list(a[[1]], b[[2]])) +rbindlist(list(a[[1]], b[[2]])) +?rbindlist +a <- list(a[[1]], b[[2]]) +a +b <- list(a[[1]], a[[2]]) +b +a[[2]] +a +devtools::check() +devtools::build() +rm(a) +rm(b) +rm(filePath) +rm(name) +rm(nc) +rm(varname) +rm(writePath) +devtools::document() +devtools::check(cran = T, manual manual = T) +devtools::check(cran = T, manual = T) +rm(a) +rm9b +rm(b) +rm(filePath) +rm(name) +rm(nc) +rm(varname) +rm(writePath) +devtools::check(as.cran= T) +?devtools::check +devtools::check(cran= T) +?devtools::build() +devtools::build() +dvetools:check(cran = T) +devtools:check(cran = T) library(devtools) -install_github('Yuanchao-Xu/hyfo') +devtools::check(cran = T) +devtools::build() +devtools::build() +install.packages('knitr') +devtools::build() library(hyfo) -?getPreciBar -b1 <- getPreciBar(tgridData, method = 'annual') -b2 <- getPreciBar(tgridData, method = 'meanMonthly') -b1 <- getPreciBar(tgridData, method = 'annual') -?getBiasFactor -devtools::document() -devtools::check() -?getPreciBar -debug(getBiasFactor) -?getBiasFactor +?writeNcdf filePath <- system.file("extdata", "tnc.nc", package = "hyfo") varname <- getNcdfVar(filePath) +varname nc <- loadNcdf(filePath, varname) -biasFactor <- getBiasFactor(nc, tgridData) -S -str(biasFactor) -biasFactor <- getBiasFactor(nc, tgridData) -debug(standardGeneric) -devtools::document() -devtools::check() -devtools::install_github('Yuanchao-Xu/hyfo') -devtools::install_github('Yuanchao-Xu/hyfo') -devtools::install_github('Yuanchao-Xu/hyfo') -devtools::install_github('Yuanchao-Xu/hyfo') -library(devtools) -devtools::install_github('Yuanchao-Xu/hyfo') -devtools::install_github('Yuanchao-Xu/hyfo') -install_github('Yuanchao-Xu/hyfo') -install_github('Yuanchao-Xu/hyfo') -install_github('Yuanchao-Xu/hyfo') -devtools::install_github('Yuanchao-Xu/hyfo') -devtools::install_github('Yuanchao-Xu/hyfo') -devtools::install_github('Yuanchao-Xu/hyfo') +writeNcdf(nc, 'test.nc') +debug(writeNcdf) +writeNcdf(nc, 'test.nc') +writeNcdf(nc, 'test.nc') +debug(getTimeUnit) +dates +units() +units +time +dates +dates[1] +rem +devtools::build() +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) +library(hyfo) +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) +sp::rebuild_CRS(cat) +install.packages('sp') +install.packages("sp") +install.packages('sp') +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) +library(hyfo) +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) +sp::rebuild_CRS(cat) +a <- sp::rebuild_CRS(cat) +a +getSpatialMap(tgridData, method = 'meanAnnual', catchment = a) +save(a) +save(a, file.choose()) +save(file.choose(), a) +file<-file.choose() +file<-choose.dir() +save(a, file) +file +save(file, a) +saveRDS(a, file) +saveRDS(a, choose.dir()) +saveRDS(a, choose.dir()) +saveRDS(a, 'testCat.shp') +saveRDS(a, file.choose(new = T)) library(hyfo) -?getBiasFactor -?getPreciBar +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) +cat +a +a == cat +identical(a, cat) devtools::build() -devtools::document() devtools::check() -devtools::build() -??readData_folder -?hyfo::readData_folder -devtools::use_travis() +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) +library(hyfo) +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) +file <- system.file("extdata", "testCat.rda", package = "hyfo") +cat <- shp2cat(file) +file <- system.file("extdata", "testCat.shp", package = "hyfo") +cat <- shp2cat(file) devtools::check() devtools::check(cran = T) -?check -devtools::check(cran = T) devtools::build() -Sys.getenv("R_PROFILE_USER") -?install -??install -devtools::use_vignette("my-vignette") -devtools::check(cran = T) -devtools::check(cran = T) -?tools +devtools::document() devtools::check(cran = T) +devtools::built() devtools::build() -devtools::check(cran = T) devtools::build() -devtools::check(cran = T) devtools::build() -devtools::check(cran = T) devtools::build() -devtools::check(cran = T) devtools::build() -devtools::check(cran = T) +devtools::check(remote = T, cran = T) +devtools::check_win_devel() devtools::build() -a <- readLines('https://cran.r-project.org/web/packages/hyfo/NEWS') -a -devtools::check(cran = T) +devtools::check_win_devel() +devtools::check_win_devel() devtools::build() devtools::build() +devtools::check_win_devel() +install.packages("sf") +devtools::check_win_devel() +install.packages("sf") +install.packages("sf") +devtools::check_win_devel() +devtools::check_win_devel() +devtools::check_win_devel() +devtools::check_win_devel() devtools::build() -devtools::document() -devtools::check() -devtools::document() -devtools::check(cran = T) -install.packages('ncdf4') -library(ncdf4) -?open.ncdf -library(ncdf) -?open.ncdf -?nc_open -filePath <- file.choose() -nc <- nc_open(filePath) -names <- names(nc$var) -str(nc) -names <- names(nc$var) -names -citation(ncdf4) -citation('ncdf4') -name -names -varname <- names -nc <- nc_open(filePath) -str(nc) -varname -call_1 <- as.call(c( -list(as.name('$'), var, varname) -)) -var <- eval(call_1) -var <- nc$var -var <- eval(call_1) -call_1 <- as.call(c( -list(as.name('$'), var, varname) -)) -var <- eval(call_1) -if(is.null(var)) stop('No such variable name, check source file.') -var -str(var) -nc_data <- ncvar_get(nc, var) -str(nc_data) -a <- nc_data -nc_data <- get.var.ncdf(nc,var) -dimNames <- unlist(lapply(1:length(var$dim), function(x) var$dim[[x]]$name)) -dimIndex <- match(c('lon', 'lat', 'time', 'member'), dimNames) -gridData <- list() -gridData$Variable$varName <- varname -gridData$xyCoords$x <- var$dim[[dimIndex[1]]]$vals -gridData$xyCoords$y <- var$dim[[dimIndex[2]]]$vals -timeUnit <- strsplit(var$dim[[dimIndex[3]]]$units, split = ' since')[[1]][1] -timeDiff <- var$dim[[dimIndex[3]]]$vals -# To get real time, time since when has to be grabbed from the dataset. -timeSince <- as.POSIXlt(strsplit(var$dim[[dimIndex[3]]]$units, split = 'since')[[1]][2], tz = tz) -timeSince <- as.POSIXlt(strsplit(var$dim[[dimIndex[3]]]$units, split = 'since')[[1]][2], tz = tz) -tz = 'GMT' -timeSince <- as.POSIXlt(strsplit(var$dim[[dimIndex[3]]]$units, split = 'since')[[1]][2], tz = tz) -unitDic <- data.frame(weeks = 'weeks', days = 'days', hours = 'hours', -minutes = 'mins', seconds = 'secs') -timeDiff <- as.difftime(timeDiff, units = as.character(unitDic[1, timeUnit])) -Date <- timeSince + timeDiff -if (length(Date) == 1) { -warning("Only one time step is taken, time dimension is dropped in the original data. -But after loading, the time dimension (with length : 1) will be added.") -} -gridData$Dates$start <- as.character(Date) -if (length(dim(nc_data)) < 3) { -dim(nc_data) <- c(dim(nc_data), 1) -message('Time dimension is added, make sure in your original data, only time dimension was dropped.') -} -gridData$Data <- nc_data -attributes(gridData$Data)$dimensions <- dimNames -if (!is.na(dimIndex[4])) gridData$Members <- var$dim[[dimIndex[4]]]$vals -gridData$Loaded <- 'by hyfo package, http://yuanchao-xu.github.io/hyfo/' -nc_close(nc) -downscaleNcdf <- function(gridData, year = NULL, month = NULL, lon = NULL, lat = NULL) { -if (!is.null(year)) { -Dates <- as.POSIXlt(gridData$Dates$start) -yearIndex <- Dates$year + 1900 -monIndex <- Dates$mon + 1 -timeDim <- match('time', attributes(gridData$Data)$dimensions) -if (is.null(month) || !any(sort(month) != month)) { -targetYearIndex <- which(yearIndex %in% year) -if (length(targetYearIndex) == 0) stop('No input years in the input ts, check your input.') -# if year crossing than sort(month) != month -} else { -startIndex <- intersect(which(yearIndex == year[1] - 1), which(monIndex == month[1]))[1] -endIndex <- tail(intersect(which(yearIndex == tail(year, 1)), which(monIndex == tail(month, 1))), 1) -if (is.na(startIndex) || length(endIndex) == 0 || startIndex > endIndex) { -stop('Cannot find input months and input years in the input time series.') -} else { -targetYearIndex <- startIndex:endIndex -if (any(diff(year) != 1)) { -# if year is not continuous, like 1999, 2003, 2005, than we have to sift again. -# Only for special cases. -Dates <- Dates[targetYearIndex] -yea <- Dates$year + 1900 -mon <- Dates$mon + 1 -DateIndex <- unlist(sapply(year, function(x) { -startIndex <- intersect(which(yea == x - 1), which(mon == month[1]))[1] -endIndex <- tail(intersect(which(yea == x), which(mon == tail(month, 1))), 1) -index <- startIndex:endIndex -return(index) -})) -targetYearIndex <- targetYearIndex[DateIndex] -# cannot directly return output here, because sometimes, month can be incontinuous, -# we still need the next process to sift month. -} -} -} -gridData$Dates$start <- gridData$Dates$start[targetYearIndex] -gridData$Dates$end <- gridData$Dates$end[targetYearIndex] -gridData$Data <- chooseDim(gridData$Data, timeDim, targetYearIndex) -} -if (!is.null(month)) { -Dates <- as.POSIXlt(gridData$Dates$start) -monIndex <- Dates$mon + 1 -targetMonIndex <- which(monIndex %in% month) -if (length(targetMonIndex) == 0) stop('Check your input year, it may exceed the years -in the input dataset.') -gridData$Dates$start <- gridData$Dates$start[targetMonIndex] -gridData$Dates$end <- gridData$Dates$end[targetMonIndex] -timeDim <- match('time', attributes(gridData$Data)$dimensions) -gridData$Data <- chooseDim(gridData$Data, timeDim, targetMonIndex) -} -if (!is.null(lon)) { -lonIndex <- gridData$xyCoords$x -lonI1 <- which(abs(lonIndex - min(lon)) == min(abs(lonIndex - min(lon)), na.rm = TRUE)) -lonI2 <- which(abs(lonIndex - max(lon)) == min(abs(lonIndex - max(lon)), na.rm = TRUE)) -# take the as large as possible range -targetLonIndex <- lonI1[length(lonI1)]:lonI2[length(lonI2)] -if (length(targetLonIndex) == 0) stop('Your input lon is too small, try to expand the -longitude range.') -gridData$xyCoords$x <- gridData$xyCoords$x[targetLonIndex] -lonDim <- match('lon', attributes(gridData$Data)$dimensions) -gridData$Data <- chooseDim(gridData$Data, lonDim, targetLonIndex) -} -if (!is.null(lat)) { -latIndex <- gridData$xyCoords$y -latI1 <- which(abs(latIndex - min(lat)) == min(abs(latIndex - min(lat)), na.rm = TRUE)) -latI2 <- which(abs(latIndex - max(lat)) == min(abs(latIndex - max(lat)), na.rm = TRUE)) -targetLatIndex <- latI1[length(latI1)]:latI2[length(latI2)] -if (length(targetLonIndex) == 0) stop('Your input lat is too small, try to expand the -latitude range.') -gridData$xyCoords$y <- gridData$xyCoords$y[targetLatIndex] -latDim <- match('lat', attributes(gridData$Data)$dimensions) -gridData$Data <- chooseDim(gridData$Data, latDim, targetLatIndex) -} -return(gridData) -} -output <- downscaleNcdf(gridData, ...) -output <- downscaleNcdf(gridData) -str(gridData) -name <- gridData$Variable$varName -filePath <- file.choose(new = T) -missingValue = 1e20 -units = NULL -name <- gridData$Variable$varName -dimLon <- ncdim_def('lon', 'degree', gridData$xyCoords$x) -dimLat <- ncdim_def('lat', 'degree', gridData$xyCoords$y) -dimMem <- NULL -dimMem <- ncdim_def('member', 'members', 1:length(gridData$Members)) -dimMem -is.null(gridData$Members) -dimMem <- NULL -if (!is.null(gridData$Members)) { -dimMem <- ncdim_def('member', 'members', 1:length(gridData$Members)) -} -# Time needs to be treated seperately -dates <- as.POSIXlt(gridData$Dates$start, tz = tz) -?getTimeUnit -??getTimeUnit -units <- getTimeUnit(dates) -??getTimeUnit -getTimeUnit <- function(dates) { -units <- c('weeks', 'days', 'hours', 'mins', 'secs') -output <- NULL -for (unit in units) { -time <- difftime(dates, dates[1], units = unit) -rem <- sapply(time, function(x) x%%1) -if (!any(rem != 0)) { -output <- unit -break -} -} -return(output) -} -dates <- as.POSIXlt(gridData$Dates$start, tz = tz) -if (is.null(units)) { -units <- getTimeUnit(dates) -time <- difftime(dates, dates[1], units = units) -} else { -time <- difftime(dates, dates[1], units = units) -} -timeUnits <- paste(units, 'since', dates[1]) -dimTime <- ncdim_def('time', timeUnits, time) -timeUnits -time -as.numeric(time) -dimTime <- ncdim_def('time', timeUnits, as.numeric(time)) -str(dimTime) -dimList <- list(dimLon, dimLat, dimTime, dimMem) -dimList <- Filter(Negate(is.null), dimList) -var <- ncvar_def( name, "units", dimList, missingValue) -nc <- nc_create(filePath, var) -ncatt_put(nc, "time", "standard_name","time") -ncatt_put(nc, "time", "axis","T") -ncatt_put(nc, "time", "_CoordinateAxisType","Time") -#ncatt_put(nc, "time", "_ChunkSize",1) -ncatt_put(nc, "lon", "standard_name","longitude") -ncatt_put(nc, "lon", "_CoordinateAxisType","Lon") -ncatt_put(nc, "lat", "standard_name","latitude") -ncatt_put(nc, "lat", "_CoordinateAxisType","Lat") -if (!is.null(dimMem)){ -ncatt_put(nc, "member", "standard_name","realization") -ncatt_put(nc, "member", "_CoordinateAxisType","Ensemble") -#att.put.ncdf(nc, "member", "ref","http://www.uncertml.org/samples/realisation") -} -ncatt_put(nc, 0, "Conventions","CF-1.4") -ncatt_put(nc, 0, 'WrittenBy', 'hyfo(http://yuanchao-xu.github.io/hyfo/)') -dimIndex <- match(c('lon', 'lat', 'time', 'member'), attributes(gridData$Data)$dimensions) -dimIndex <- na.omit(dimIndex) -dimIndex -data <- aperm(gridData$Data, dimIndex) -ncvar_put(nc, name, data) -nc_close(nc) -devtools::document() -devtools::check(cran=T) -devtools::document() -devtools::check(cran=T) devtools::build() -a <- c(null,null) -a <- c(NULL, NULL) -a -?match -match() -match -x <- c('lon', 'lat', 'time', 'member') -table <- c('longitude', 'latitude') -index <- lapply(x, function(x) { -a <- grep(x, table) -}) -a -index -index <- sapply(x, function(x) { -a <- grep(x, table) -}) -index -index <- unlist(lapply(x, function(x) { -a <- grep(x, table) -})) -index -table <- c('latitude', 'longitude') -index <- unlist(lapply(x, function(x) { -a <- grep(x, table) -})) -index -table <- c('itude', 'itude') -index <- unlist(lapply(x, function(x) { -a <- grep(x, table) -})) -index +devtools::check(remote = T, cran = T) devtools::document() -devtools::check() -devtools::check() -devtools::build() +devtools::check(remote = T, cran = T) +devtools::check_win_devel() +devtools::check_win_devel() +install.packages("devtools") +install.packages("devtools") +install.packages("devtools") +install.packages("devtools") +install.packages("devtools") +install.packages("devtools") +install.packages("devtools") devtools::document() -devtools::check() -devtools::check() -?match -devtools::build() +install.packages("cli") +install.packages("cli") devtools::document() -devtools::check() +devtools::check(remote = T, cran = T) +library(cli) devtools::document() -devtools::check() -devtools::build() +library(devtools) devtools::document() -devtools::check() -devtools::build() +library(rlang) devtools::document() -devtools::check() +devtools::check(remote = T, cran = T) devtools::build() -devtools::document() -devtools::check() +?devtools +devtools::build(binary = T) devtools::build() -library(ncdf4) -?nc_write -??nc_create -devtools::document() -devtools::document() -devtools::check() +devtools::submit_cran() +devtools::use_cran_comments() +use_cran_comments() +usethis::use_cran_comments() +devtools::submit_cran() +devtools::submit_cran() +devtools::submit_cran() devtools::build() -writePath <- file.choose() -writePath <- file.choose(new = T) -library(hyfo) -filePath <- file.choose() -name <- getNcdfVar(filePath) -a <- loadNcdf(filePath, name[6]) -str(a) -a$xyCoords$y -attributes(a$xyCoords$y) -attributes(a$xyCoords$y)$name <- 'dafa' -attributes(a$xyCoords$y) +devtools::submit_cran() +debug(devtools::build()) +debug(submit_cran) +debug(devtools::submit_cran) +devtools::submit_cran() +devtools::submit_cran() +r +new_url +new_url$query$submit == "1" +new_url$query$submit +new_url$query +r$url +httr::parse_url(r$url) +devtools::submit_cran() +undebug(devtools::submit_cran) +devtools::submit_cran() +devtools::check(remote = T, cran = T) +devtools::check(remote = T, cran = T) +devtools::check(remote = T, cran = T) +devtools::check(remote = T, cran = T) +devtools::check_win_devel() +devtools::submit_cran() +devtools::check(remote = T, cran = T) +devtools::check(remote = T, cran = T) +install.packages("sp") +install.packages("sp") +install.packages("sp") +devtools::check(remote = T, cran = T) +devtools::check(remote = T, cran = T) +devtools::check_win_devel() +devtools::check_win_devel() +devtools::submit_cran() devtools::build() -devtools::check() -devtools::check() -attributes(a$xyCoords$x)$name -?writeNcdf -filePath1 <- system.file("extdata", "tnc.nc", package = "hyfo") -varname <- getNcdfVar(filePath1) -nc <- loadNcdf(filePath1, varname1) -nc <- loadNcdf(filePath1, varname) -str(nc) -nc$xyCoords$x -nc$xyCoords$y -devtools::check() -devtools::build() -devtools::check() -devtools::build() -devtools::check() -filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -varname <- getNcdfVar(filePath) -nc <- loadNcdf(filePath, varname) -writePath <- file.choose(new=T) -writeNcdf(nc, writePath) -a <- loadNcdf(writePath, varname) -str(a) -filePath <- file.choose() -name <- getNcdfVar(filePath) -a <- loadNcdf(filePath, name[6]) -writeNcdf(nc, writePath) -b <- loadNcdf(writePath, name[6]) -writeNcdf(a, writePath) -b <- loadNcdf(writePath, name[6]) -str(b) -str(a) -debug(writeNcdf) -writeNcdf(a, writePath) -str(data) -debug(loadNcdf) -b <- loadNcdf(writePath, name[6]) -str(var) -devtools::check() -writeNcdf(a, writePath) -b <- loadNcdf(writePath, name[6]) -str(b) -debug(writeNcdf) -writeNcdf(a, writePath) -dimIndex -dimIndex -str(diList) -str(dimList) -str(dimList) -str(dimList) -name -str(var) -str(dimList) -dimIndex -attributes(gridData$Data)$dimensions -str(dimList) -c('lon', 'lat', 'time', 'member')[dimIndex] -dimIndex -c('lon', 'lat', 'time', 'member')[c(1,3,2)] -c('lon', 'lat', 'time', 'member')[c(1,2,3)] -attributes(gridData$Data)$dimensions -grep('longitude', 'lon') -grep('lon','longitude') -?grep -dimIndex -order(dimIndex) -dimIndex -str(dimIndex) -devtools::check() -devtools::check() -writeNcdf(a, writePath) -debug(writeNcdf) -writeNcdf(a, writePath) -devtools::check() -debug(writeNcdf) -writeNcdf(a, writePath) -dimIndex -str(dimList) -str(dimList) -str(dimList) -str(var) -name -str(nc) -b <- loadNcdf(writePath, name[6]) -str(b) -identical(a, b) -str(a) -identical(a$Variable, b$Variable) -identical(a$xyCoords, b$xyCoords) -identical(a$Dates, b$Dates) -identical(a$Data, b$Data) -str(a$Data) -str(b$Data) -a$Data == b$Data -a$Data - b$Data -a$Data - b$Data > 0.0001 -any((a$Data - b$Data > 0.0001) == T) -any((a$Data - b$Data > 0.0000001) == T) -devtools::build() -devtools::check() -devtools::build() -library(hyfo) -?biasCorrect -filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -varname <- getNcdfVar(filePath) -nc <- loadNcdf(filePath, varname) -data(tgridData) -newFrc <- biasCorrect(nc, tgridData, tgridData) -newFrc <- biasCorrect(tgridData, tgridData, tgridData) -debug(biasCorrect) -newFrc <- biasCorrect(tgridData, tgridData, tgridData) -undebug(biasCorrect) -debug(biasCorrect.list) -devtools::check() -debug(biasCorrect.list) -newFrc <- biasCorrect(tgridData, tgridData, tgridData) -memberIndex -devtools::check() diff --git a/.Rproj.user/7DCFFB88/cpp-definition-cache b/.Rproj.user/7DCFFB88/cpp-definition-cache new file mode 100644 index 0000000..32960f8 --- /dev/null +++ b/.Rproj.user/7DCFFB88/cpp-definition-cache @@ -0,0 +1,2 @@ +[ +] \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/pcs/debug-breakpoints.pper b/.Rproj.user/7DCFFB88/pcs/debug-breakpoints.pper new file mode 100644 index 0000000..5528aea --- /dev/null +++ b/.Rproj.user/7DCFFB88/pcs/debug-breakpoints.pper @@ -0,0 +1,6 @@ +{ + "debugBreakpointsState" : { + "breakpoints" : [ + ] + } +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/pcs/files-pane.pper b/.Rproj.user/7DCFFB88/pcs/files-pane.pper new file mode 100644 index 0000000..10314bb --- /dev/null +++ b/.Rproj.user/7DCFFB88/pcs/files-pane.pper @@ -0,0 +1,9 @@ +{ + "path" : "~/GitHub/hyfo/R", + "sortOrder" : [ + { + "ascending" : true, + "columnIndex" : 2 + } + ] +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/pcs/find-in-files.pper b/.Rproj.user/7DCFFB88/pcs/find-in-files.pper new file mode 100644 index 0000000..ad22e37 --- /dev/null +++ b/.Rproj.user/7DCFFB88/pcs/find-in-files.pper @@ -0,0 +1,10 @@ +{ + "dialog-state" : { + "caseSensitive" : false, + "filePatterns" : [ + ], + "path" : "~/GitHub/hyfo", + "query" : "gtable", + "regex" : false + } +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/pcs/source-pane.pper b/.Rproj.user/7DCFFB88/pcs/source-pane.pper new file mode 100644 index 0000000..1743e40 --- /dev/null +++ b/.Rproj.user/7DCFFB88/pcs/source-pane.pper @@ -0,0 +1,3 @@ +{ + "activeTab" : 0 +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/pcs/windowlayoutstate.pper b/.Rproj.user/7DCFFB88/pcs/windowlayoutstate.pper new file mode 100644 index 0000000..5d66c60 --- /dev/null +++ b/.Rproj.user/7DCFFB88/pcs/windowlayoutstate.pper @@ -0,0 +1,14 @@ +{ + "left" : { + "panelheight" : 943, + "splitterpos" : 392, + "topwindowstate" : "NORMAL", + "windowheight" : 981 + }, + "right" : { + "panelheight" : 943, + "splitterpos" : 588, + "topwindowstate" : "NORMAL", + "windowheight" : 981 + } +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/pcs/workbench-pane.pper b/.Rproj.user/7DCFFB88/pcs/workbench-pane.pper new file mode 100644 index 0000000..0e24b84 --- /dev/null +++ b/.Rproj.user/7DCFFB88/pcs/workbench-pane.pper @@ -0,0 +1,6 @@ +{ + "TabSet1" : 0, + "TabSet2" : 0, + "TabZoom" : { + } +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/rmd-outputs b/.Rproj.user/7DCFFB88/rmd-outputs new file mode 100644 index 0000000..3f2ff2d --- /dev/null +++ b/.Rproj.user/7DCFFB88/rmd-outputs @@ -0,0 +1,5 @@ + + + + + diff --git a/.Rproj.user/7DCFFB88/saved_source_markers b/.Rproj.user/7DCFFB88/saved_source_markers new file mode 100644 index 0000000..2b1bef1 --- /dev/null +++ b/.Rproj.user/7DCFFB88/saved_source_markers @@ -0,0 +1 @@ +{"active_set":"","sets":[]} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/sdb/per/t/B4594EC b/.Rproj.user/7DCFFB88/sdb/per/t/B4594EC new file mode 100644 index 0000000..d4292c9 --- /dev/null +++ b/.Rproj.user/7DCFFB88/sdb/per/t/B4594EC @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "\n\n\n#' Biascorrect the input timeseries or hyfo dataset\n#' \n#' Biascorrect the input time series or dataset, the input time series or dataset should consist of observation, hindcast, and forecast.\n#' observation and hindcast should belong to the same period, in order to calibrate. Then the modified forecast\n#' will be returned. If the input is a time series, first column should be date column and rest columns should be \n#' the value column. If the input is a hyfo dataset, the dataset should be the result of \\code{loadNcdf}, or a list\n#' file with the same format. \n#' \n#' \n#' @param frc a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the forecast to be calibrated.\n#' @param hindcast a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \n#' representing the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as\n#' observation data. Check details for more information.\n#' @param obs a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the observation data.\n#' @param method bias correct method, including 'delta', 'scaling'..., default is 'scaling'\n#' @param scaleType only when the method \"scaling\" is chosen, scaleType will be available. Two different types\n#' of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method. More info check \n#' details. Default scaleType is 'multi'.\n#' @param preci If the precipitation is biascorrected, then you have to assign \\code{preci = TRUE}. Since for\n#' precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. \n#' Default is FALSE, refer to details.\n#' @param prThreshold The minimum value that is considered as a non-zero precipitation. Default to 0 (assuming mm). If you want \n#' to use precipitation biascorrect, you should consider carefully how to set this threshold, usually is 1. But you \n#' can try with different numbers to see the results.\n#' @param extrapolate When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs.\n#' If 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.\n#' @details \n#' \n#' Since climate forecast is based on global condition, when downscaling to different regions, it may include\n#' some bias, biascorrection is used then to fix the bias.\n#' \n#' \\strong{Hindcast}\n#' \n#' In order to bias correct, we need to pick up some data from the forecast to train with\n#' the observation, which is called hindcast in this function. Using hindcast and observation, \n#' the program can analyze the bias and correct the bias in the forecast. \n#' \n#' Hindcast should have \\strong{EVERY} attributes that forecast has.\n#' \n#' Hindcast is also called re-forecast, is the forecast of the past. E.g. you have a forecast from year 2000-2010, assuming now you are in 2005. So from 2000-2005, this period\n#' is the hindcast period, and 2005-2010, this period is the forecast period.\n#'\n#' Hindcast can be the same as forecast, i.e., you can use forecast itself as hindcast to train the bias correction.\n#'\n#'\n#' \\strong{How it works}\n#' \n#' Forecast product has to be calibrated, usually the system is doing forecast in real time. So, e.g., if the \n#' forecast starts from year 2000, assuming you are in year 2003, then you will have 3 years' hindcast \n#' data (year 2000-2003), which can be used to calibrate. And your forecast period is (2003-2004)\n#' \n#' E.g. you have observation from 2001-2002, this is your input obs. Then you can take the same \n#' period (2001-2002) from the forecast, which is the hindcast period. For forecast, you can take any period.\n#' The program will evaluate the obs and hindcast, to get the modification of the forecast, and then add the \n#' modification to the forecast data.\n#' \n#' The more categorized input, the more accurate result you will get. E.g., if you want to \n#' bias correct a forecast for winter season. So you'd better to extract all the winter period\n#' in the hindcast and observation to train. \\code{extractPeriod} can be used for this purpose.\n#' \n#' \\strong{method}\n#' \n#' Different methods used in the bias correction. Among which, delta, scaling can be applied\n#' to different kinds of parameters, with no need to set \\code{preci}; eqm has two conditions for rainfall data and other data,\n#' it needs user to input \\code{preci = TRUE/FALSE} to point to different conditions; gqm is\n#' designed for rainfall data, so \\code{preci = TRUE} needs to be set.\n#' \n#' \\strong{delta}\n#' \n#' This method consists on adding to the observations the mean change signal (delta method). \n#' This method is applicable to any kind of variable but it is preferable to avoid it for bounded variables\n#' (e.g. precipitation, wind speed, etc.) because values out of the variable range could be obtained \n#' (e.g. negative wind speeds...)\n#' \n#' \\strong{scaling}\n#' \n#' This method consists on scaling the simulation with the difference (additive) or quotient (multiplicative) \n#' between the observed and simulated means in the train period. The \\code{additive} or \\code{multiplicative}\n#' correction is defined by parameter \\code{scaling.type} (default is \\code{additive}).\n#' The additive version is preferably applicable to unbounded variables (e.g. temperature) \n#' and the multiplicative to variables with a lower bound (e.g. precipitation, because it also preserves the frequency). \n#' \n#' \\strong{eqm}\n#' \n#' Empirical Quantile Mapping. This is a very extended bias correction method which consists on calibrating the simulated Cumulative Distribution Function (CDF) \n#' by adding to the observed quantiles both the mean delta change and the individual delta changes in the corresponding quantiles. \n#' This method is applicable to any kind of variable.\n#' \n#' It can keep the extreme value, if you choose constant extrapolation method. But then you will face the risk\n#' that the extreme value is an error.\n#' \n#' \\strong{gqm}\n#' \n#' Gamma Quantile Mapping. This method is described in Piani et al. 2010 and is applicable only to precipitation. It is based on the initial assumption that both observed\n#' and simulated intensity distributions are well approximated by the gamma distribution, therefore is a parametric q-q map \n#' that uses the theorical instead of the empirical distribution. \n#' \n#' It can somehow filter some extreme values caused by errors, while keep the extreme value. Seems more reasonable.\n#' Better have a long period of training, and the if the forecast system is relatively stable.\n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples \n#' \n#' ######## hyfo grid file biascorrection\n#' ########\n#' \n#' # If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect\n#' # the file.\n#' \n#' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' data(tgridData)\n#' # Since the example data, has some NA values, the process will include some warning #message, \n#' # which can be ignored in this case.\n#' \n#' \n#' \n#' \n#' # Then we will use nc data as forecasting data, and use itself as hindcast data,\n#' # use tgridData as observation.\n#' newFrc <- biasCorrect(nc, nc, tgridData) \n#' newFrc <- biasCorrect(nc, nc, tgridData, scaleType = 'add') \n#' newFrc <- biasCorrect(nc, nc, tgridData, method = 'eqm', extrapolate = 'constant', \n#' preci = TRUE) \n#' newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) \n#' \n#' \n#' ######## Time series biascorrection\n#' ########\n#' \n#' # Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\n#' data(testdl)\n#' \n#' # common period has to be extracted in order to better train the forecast.\n#' \n#' datalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' frc <- datalist[[1]]\n#' hindcast <- datalist[[2]]\n#' obs <- datalist[[3]]\n#' \n#' \n#' # The data used here is just for example, so there could be negative data.\n#' \n#' # default method is scaling, with 'multi' scaleType\n#' frc_new <- biasCorrect(frc, hindcast, obs)\n#' \n#' # for precipitation data, extra process needs to be executed, so you have to tell\n#' # the program that it is a precipitation data.\n#' \n#' frc_new1 <- biasCorrect(frc, hindcast, obs, preci = TRUE)\n#' \n#' # You can use other scaling methods to biascorrect.\n#' frc_new2 <- biasCorrect(frc, hindcast, obs, scaleType = 'add')\n#' \n#' # \n#' frc_new3 <- biasCorrect(frc, hindcast, obs, method = 'eqm', preci = TRUE)\n#' frc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE)\n#' \n#' plotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n#' \n#' # You can also give name to this input list.\n#' TSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\n#' names(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\n#' plotTS(list = TSlist, plot = 'cum')\n#' \n#' \n#' # If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n#' # for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n#' # for example Dec, Jan and Feb of every year from year 1999-2005.\n#' # In such case, you need to extract certain months and years from observed time series.\n#' # extractPeriod() can be then used.\n#' \n#' \n#'\n#'\n#'\n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' @author Yuanchao Xu \\email{xuyuanchao37@@gmail.com }\n#' @importFrom methods setMethod\n#' @export\n#' \nsetGeneric('biasCorrect', function(frc, hindcast, obs, method = 'scaling', scaleType = 'multi', \n preci = FALSE, prThreshold = 0, extrapolate = 'no') {\n standardGeneric('biasCorrect')\n})\n\n\n# Since in new version of roxygen2, describeIn was changed, https://stackoverflow.com/questions/24246594/automatically-document-all-methods-of-an-s4-generic-using-roxygen2\n# so use rdname instead\n#' @rdname biasCorrect\n#' \nsetMethod('biasCorrect', signature('data.frame', 'data.frame', 'data.frame'),\n function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- biasCorrect.TS(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n#' @rdname biasCorrect\nsetMethod('biasCorrect', signature('list', 'list', 'list'), \n function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- biasCorrect.list(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n\nbiasCorrect.TS <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n # First check if the first column is Date\n if (!grepl('-|/', obs[1, 1]) | !grepl('-|/', hindcast[1, 1]) | !grepl('-|/', frc[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.If your input is a hyfo dataset, put input = \"hyfo\" as an\n argument, check help for more info.')\n }\n # change to date type is easier, but in case in future the flood part is added, Date type doesn't have\n # hour, min and sec, so, it's better to convert it into POSIxlt.\n \n # if condition only accepts one condition, for list comparison, there are a lot of conditions, better\n # further process it, like using any.\n if (any(as.POSIXlt(hindcast[, 1]) != as.POSIXlt(obs[, 1]))) {\n warning('time of obs and time of hindcast are not the same, which may cause inaccuracy in \n the calibration.')\n }\n n <- ncol(frc)\n \n # For every column, it's biascorrected respectively.\n frc_data <- lapply(2:n, function(x) biasCorrect_core(frc[, x], hindcast[, x], obs[, 2], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate))\n frc_data <- do.call('cbind', frc_data)\n rownames(frc_data) <- NULL\n \n names <- colnames(frc)\n frc_new <- data.frame(frc[, 1], frc_data)\n colnames(frc_new) <- names\n return(frc_new)\n}\n\nbiasCorrect.list <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n ## Check if the data is a hyfo grid data.\n checkHyfo(frc, hindcast, obs)\n \n hindcastData <- hindcast$Data\n obsData <- obs$Data\n frcData <- frc$Data\n \n ## save frc dimension order, at last, set the dimension back to original dimension\n frcDim <- attributes(frcData)$dimensions\n \n ## ajust the dimension into general dimension order.\n hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time'))\n obsData <- adjustDim(obsData, ref = c('lon', 'lat', 'time'))\n \n ## CheckDimLength, check if all the input dataset has different dimension length\n # i.e. if they all have the same lon and lat number.\n checkDimLength(frcData, hindcastData, obsData, dim = c('lon', 'lat'))\n \n \n # Now real bias correction is executed.\n \n # for some forcasts, they have results from different models or scenarios, if so\n # there will be a dimension called member\n \n memberIndex <- grepAndMatch('member', attributes(frcData)$dimensions)\n \n # For dataset that has a member part \n if (length(memberIndex) != 0) {\n # check if frcData and hindcastData has the same dimension and length.\n checkDimLength(frcData, hindcastData, dim = 'member')\n \n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time', 'member'))\n \n # The following code may speed up because it doesn't use for loop.\n # It firstly combine different array into one array. combine the time \n # dimension of frc, hindcast and obs. Then use apply, each time extract \n # the total time dimension, and first part is frc, second is hindcast, third\n # is obs. Then use these three parts to bias correct. All above can be written\n # in one function and called within apply. But too complicated to understand,\n # So save it for future use maybe.\n \n # for (member in 1:dim(frcData)[4]) {\n # totalArr <- array(c(frcData[,,, member], hindcastData[,,, member], obsData),\n # dim = c(dim(frcData)[1], dim(frcData)[2], \n # dim(frcData)[3] + dim(hindcastData)[3] + dim(obsData)[3]))\n # }\n \n \n for (member in 1:dim(frcData)[4]) {\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,, member] <- biasCorrect_core(frcData[lon, lat,,member], hindcastData[lon, lat,, member], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n } else {\n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time'))\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,] <- biasCorrect_core(frcData[lon, lat,], hindcastData[lon, lat,], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n \n frcData <- adjustDim(frcData, ref = frcDim)\n frc$Data <- frcData\n frc$biasCorrected_by <- method\n frc_new <- frc\n return(frc_new)\n}\n\n\n\n\n\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats ecdf quantile pgamma qgamma rgamma\n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' \n#' \n# this is only used to calculate the value column, \nbiasCorrect_core <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate){\n # If the variable is precipitation, some further process needs to be added.\n # The process is taken from downscaleR, to provide a more reasonable hindcast, used in the calibration.\n \n \n # check if frc, hindcast or obs are all na values\n if (!any(!is.na(obs)) | !any(!is.na(frc)) | !any(!is.na(hindcast))) {\n warning('In this cell, frc, hindcast or obs data is missing. No biasCorrection for this cell.')\n return(NA)\n }\n \n \n if (preci == TRUE) {\n preprocessHindcast_res <- preprocessHindcast(hindcast = hindcast, obs = obs, prThreshold = prThreshold)\n hindcast <- preprocessHindcast_res[[1]]\n minHindcastPreci <- preprocessHindcast_res[[2]]\n }\n \n # default is the simplest method in biascorrection, just do simple addition and subtraction.\n if (method == 'delta') {\n if (length(frc) != length(obs)) stop('This method needs frc data have the same length as obs data.')\n # comes from downscaleR biascorrection method\n frcMean <- mean(frc, na.rm = TRUE)\n hindcastMean <- mean(hindcast, na.rm = TRUE)\n frc <- obs - hindcastMean + frcMean\n \n } else if (method == 'scaling') {\n obsMean <- mean(obs, na.rm = TRUE)\n hindcastMean <- mean(hindcast, na.rm = TRUE)\n \n if (scaleType == 'multi') {\n frc <- frc / hindcastMean * obsMean\n \n } else if (scaleType == 'add') {\n frc <- frc - hindcastMean + obsMean\n }\n \n \n } else if (method == 'eqm') {\n if (preci == FALSE) {\n frc <- biasCorrect_core_eqm_nonPreci(frc, hindcast, obs, extrapolate, prThreshold)\n } else {\n frc <- biasCorrect_core_eqm_preci(frc, hindcast, obs, minHindcastPreci, extrapolate,\n prThreshold)\n }\n \n } else if (method == 'gqm') {\n if (preci == FALSE) stop ('gqm method only applys to precipitation, please set preci = T')\n frc <- biasCorrect_core_gqm(frc, hindcast, obs, prThreshold, minHindcastPreci)\n }\n \n \n return(frc)\n}\n\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats rgamma\npreprocessHindcast <- function(hindcast, obs, prThreshold) {\n lowerIndex <- length(which(obs < prThreshold))\n \n # In the original function, this minHindcastPreci is Pth[,i,j] in downscaleR, and it is originally\n # set to NA, which is not so appropriate for all the precipitations.\n # In the original function, there are only two conditions, 1. all the obs less than threshold\n # 2. there are some obs less than threshold. \n # While, if we set threshold to 0, there could be a 3rd condition, all the obs no less than threshold.\n # Here I set this situation, firstly set minHindcastPreci to the min of the hindcast. Because in future\n # use, 'eqm' method is going to use this value.\n \n # The problem above has been solved.\n \n \n if (lowerIndex >= 0 & lowerIndex < length(obs)) {\n index <- sort(hindcast, decreasing = FALSE, na.last = NA, index.return = TRUE)$ix\n hindcast_sorted <- sort(hindcast, decreasing = FALSE, na.last = NA)\n # minHindcastPreci is the min preci over threshold FOR ***HINDCAST***\n # But use obs to get the lowerIndex, so obs_sorted[lowerIndex + 1] > prThreshold, but\n # hindcast_sorted[lowerIndex + 1] may greater than or smaller than ptThreshold\n \n \n # It would be better to understand if you draw two lines: hindcast_sorted and obs_sorted\n # with y = prThreshold, you will find the difference of the two.\n \n # In principle, the value under the threshold needs to be replaced by some other reasonable value.\n # simplest way \n minHindcastPreci <- hindcast_sorted[lowerIndex + 1]\n \n # Also here if minHindcastPreci is 0 and prThreshold is 0, will cause problem, bettter set \n # I set it prThreshold != 0 \n if (minHindcastPreci <= prThreshold & prThreshold != 0) {\n obs_sorted <- sort(obs, decreasing = FALSE, na.last = NA)\n \n # higherIndex is based on hindcast\n higherIndex <- which(hindcast_sorted > prThreshold & !is.na(hindcast_sorted))\n \n if (length(higherIndex) == 0) {\n higherIndex <- max(which(!is.na(hindcast_sorted)))\n higherIndex <- min(length(obs_sorted), higherIndex)\n } else {\n higherIndex <- min(higherIndex)\n }\n # here I don't know why choose 6.\n # Written # [Shape parameter Scale parameter] in original package\n # according to the reference and gamma distribution, at least 6 values needed to fit gamma\n # distribution.\n if (length(unique(obs_sorted[(lowerIndex + 1):higherIndex])) < 6) {\n hindcast_sorted[(lowerIndex + 1):higherIndex] <- mean(obs_sorted[(lowerIndex + 1):higherIndex], \n na.rm = TRUE)\n } else {\n obsGamma <- fitdistr(obs_sorted[(lowerIndex + 1):higherIndex], \"gamma\")\n \n # this is to replace the original hindcast value between lowerIndex and higherIndex with \n # some value taken from gamma distribution just generated.\n hindcast_sorted[(lowerIndex + 1):higherIndex] <- rgamma(higherIndex - lowerIndex, obsGamma$estimate[1], \n rate = obsGamma$estimate[2])\n }\n hindcast_sorted <- sort(hindcast_sorted, decreasing = FALSE, na.last = NA)\n \n } \n minIndex <- min(lowerIndex, length(hindcast))\n hindcast_sorted[1:minIndex] <- 0\n hindcast[index] <- hindcast_sorted\n \n } else if (lowerIndex == length(obs)) {\n \n index <- sort(hindcast, decreasing = FALSE, na.last = NA, index.return = TRUE)$ix\n hindcast_sorted <- sort(hindcast, decreasing = FALSE, na.last = NA)\n minHindcastPreci <- hindcast_sorted[lowerIndex]\n \n # here is to compare with hindcast, not obs\n minIndex <- min(lowerIndex, length(hindcast))\n hindcast_sorted[1:minIndex] <- 0\n hindcast[index] <- hindcast_sorted\n \n }\n return(list(hindcast, minHindcastPreci))\n}\n\nbiasCorrect_core_eqm_nonPreci <- function(frc, hindcast, obs, extrapolate, prThreshold) {\n ecdfHindcast <- ecdf(hindcast)\n \n if (extrapolate == 'constant') {\n higherIndex <- which(frc > max(hindcast, na.rm = TRUE))\n lowerIndex <- which(frc < min(hindcast, na.rm = TRUE))\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(frc), extrapolateIndex)\n \n # In case extrapolateIndex is of length zero, than extrapolate cannot be used afterwards\n # So use setdiff(1:length(sim), extrapolateIndex), if extrapolateIndex == 0, than it will\n # return 1:length(sim)\n \n if (length(higherIndex) > 0) {\n maxHindcast <- max(hindcast, na.rm = TRUE)\n dif <- maxHindcast - max(obs, na.rm = TRUE)\n frc[higherIndex] <- frc[higherIndex] - dif\n }\n \n if (length(lowerIndex) > 0) {\n minHindcast <- min(hindcast, na.rm = TRUE)\n dif <- minHindcast - min(obs, nna.rm = TRUE)\n frc[lowerIndex] <- frc[lowerIndex] - dif\n }\n \n frc[non_extrapolateIndex] <- quantile(obs, probs = ecdfHindcast(frc[non_extrapolateIndex]), \n na.rm = TRUE, type = 4)\n } else {\n frc <- quantile(obs, probs = ecdfHindcast(frc), na.rm = TRUE, type = 4)\n }\n return(frc)\n}\n\nbiasCorrect_core_eqm_preci <- function(frc, hindcast, obs, minHindcastPreci, extrapolate, \n prThreshold) {\n \n # Most of time this condition seems useless because minHindcastPreci comes from hindcast, so there will be\n # always hindcast > minHindcastPreci exists.\n # Unless one condition that minHindcastPreci is the max in the hindcast, than on hindcast > minHindcastPreci\n if (length(which(hindcast > minHindcastPreci)) > 0) {\n \n ecdfHindcast <- ecdf(hindcast[hindcast > minHindcastPreci])\n \n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n \n # drizzle is to see whether there are some precipitation between the min frc (over threshold) and \n # min hindcast (over threshold).\n drizzle <- which(frc > minHindcastPreci & frc <= min(hindcast[hindcast > minHindcastPreci], na.rm = TRUE) \n & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n \n if (extrapolate == 'constant') {\n \n # This higher and lower index mean the extrapolation part\n higherIndex <- which(frc[rain] > max(hindcast, na.rm = TRUE))\n lowerIndex <- which(frc[rain] < min(hindcast, na.rm = TRUE))\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(rain), extrapolateIndex)\n \n if (length(higherIndex) > 0) {\n maxHindcast <- max(hindcast, na.rm = TRUE)\n dif <- maxHindcast - max(obs, na.rm = TRUE)\n frc[rain[higherIndex]] <- frc[higherIndex] - dif\n }\n \n if (length(lowerIndex) > 0) {\n minHindcast <- min(hindcast, na.rm = TRUE)\n dif <- minHindcast - min(obs, nna.rm = TRUE)\n frc[rain[lowerIndex]] <- frc[lowerIndex] - dif\n }\n \n # Here the original function doesn't accout for the situation that extraploateIndex is 0\n # if it is 0, rain[-extraploateIndex] would be nothing\n \n # Above has been solved by using setdiff.\n frc[rain[non_extrapolateIndex]] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain[non_extrapolateIndex]]), \n na.rm = TRUE, type = 4)\n } else {\n \n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain]), na.rm = TRUE, type = 4)\n }\n }\n if (length(drizzle) > 0){\n \n # drizzle part is a seperate part. it use the ecdf of frc (larger than minHindcastPreci) to \n # biascorrect the original drizzle part\n frc[drizzle] <- quantile(frc[which(frc > min(hindcast[which(hindcast > minHindcastPreci)], na.rm = TRUE) & \n !is.na(frc))], probs = ecdfFrc(frc[drizzle]), na.rm = TRUE, \n type = 4)\n }\n \n frc[noRain] <- 0\n \n } else {\n # in this condition minHindcastPreci is the max of hindcast, so all hindcast <= minHindcastPreci\n # And frc distribution is used then.\n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], probs = ecdfFrc(frc[rain]), \n na.rm = TRUE, type = 4)\n }\n frc[noRain]<-0\n }\n return(frc)\n}\n\nbiasCorrect_core_gqm <- function(frc, hindcast, obs, prThreshold, minHindcastPreci) {\n if (any(obs > prThreshold)) {\n \n ind <- which(obs > prThreshold & !is.na(obs))\n obsGamma <- fitdistr(obs[ind],\"gamma\")\n ind <- which(hindcast > 0 & !is.na(hindcast))\n hindcastGamma <- fitdistr(hindcast[ind],\"gamma\")\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n \n probF <- pgamma(frc[rain], hindcastGamma$estimate[1], rate = hindcastGamma$estimate[2])\n frc[rain] <- qgamma(probF,obsGamma$estimate[1], rate = obsGamma$estimate[2])\n frc[noRain] <- 0\n } else {\n warning('All the observations of this cell(station) are lower than the threshold, \n no bias correction applied.')\n }\n return(frc)\n }\n", + "created" : 1490325226249.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3235762164", + "id" : "B4594EC", + "lastKnownWriteTime" : 1538045340, + "last_content_update" : 1538045340407, + "path" : "~/GitHub/hyfo/R/biasCorrect(generic).R", + "project_path" : "R/biasCorrect(generic).R", + "properties" : { + }, + "relative_order" : 1, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/sdb/prop/284FBADF b/.Rproj.user/7DCFFB88/sdb/prop/284FBADF new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/7DCFFB88/sdb/prop/284FBADF @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/7DCFFB88/sdb/prop/INDEX b/.Rproj.user/7DCFFB88/sdb/prop/INDEX new file mode 100644 index 0000000..0f4d65f --- /dev/null +++ b/.Rproj.user/7DCFFB88/sdb/prop/INDEX @@ -0,0 +1,7 @@ +~%2FGitHub%2Fhyfo%2FDESCRIPTION="9E7FEC5D" +~%2FGitHub%2Fhyfo%2FNEWS="F6FD6B5D" +~%2FGitHub%2Fhyfo%2FR%2FbiasCorrect(generic).R="284FBADF" +~%2FGitHub%2Fhyfo%2FR%2Fmulti-biasCorrect(generic).R="8D6FC6B0" +~%2FGitHub%2Fhyfo%2FR%2Fncdf.R="3AC0369" +~%2FGitHub%2Fhyfo%2FREADME.md="F3D2C6B8" +~%2FGitHub%2Fhyfo%2Fvignettes%2Fhyfo.Rmd="7142D528" diff --git a/.Rproj.user/7DCFFB88/session-persistent-state b/.Rproj.user/7DCFFB88/session-persistent-state new file mode 100644 index 0000000..7b8d0ff --- /dev/null +++ b/.Rproj.user/7DCFFB88/session-persistent-state @@ -0,0 +1 @@ +virtual-session-id="22D464B7" diff --git a/.Rproj.user/D1D10CF6/cpp-definition-cache b/.Rproj.user/D1D10CF6/cpp-definition-cache new file mode 100644 index 0000000..32960f8 --- /dev/null +++ b/.Rproj.user/D1D10CF6/cpp-definition-cache @@ -0,0 +1,2 @@ +[ +] \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/pcs/debug-breakpoints.pper b/.Rproj.user/D1D10CF6/pcs/debug-breakpoints.pper new file mode 100644 index 0000000..5528aea --- /dev/null +++ b/.Rproj.user/D1D10CF6/pcs/debug-breakpoints.pper @@ -0,0 +1,6 @@ +{ + "debugBreakpointsState" : { + "breakpoints" : [ + ] + } +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/pcs/files-pane.pper b/.Rproj.user/D1D10CF6/pcs/files-pane.pper new file mode 100644 index 0000000..10314bb --- /dev/null +++ b/.Rproj.user/D1D10CF6/pcs/files-pane.pper @@ -0,0 +1,9 @@ +{ + "path" : "~/GitHub/hyfo/R", + "sortOrder" : [ + { + "ascending" : true, + "columnIndex" : 2 + } + ] +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/pcs/find-in-files.pper b/.Rproj.user/D1D10CF6/pcs/find-in-files.pper new file mode 100644 index 0000000..5771dfb --- /dev/null +++ b/.Rproj.user/D1D10CF6/pcs/find-in-files.pper @@ -0,0 +1,10 @@ +{ + "dialog-state" : { + "caseSensitive" : false, + "filePatterns" : [ + ], + "path" : "~/GitHub/hyfo", + "query" : "lmcoef", + "regex" : false + } +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/pcs/source-pane.pper b/.Rproj.user/D1D10CF6/pcs/source-pane.pper new file mode 100644 index 0000000..d3d70fa --- /dev/null +++ b/.Rproj.user/D1D10CF6/pcs/source-pane.pper @@ -0,0 +1,3 @@ +{ + "activeTab" : 6 +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/pcs/windowlayoutstate.pper b/.Rproj.user/D1D10CF6/pcs/windowlayoutstate.pper new file mode 100644 index 0000000..47ab870 --- /dev/null +++ b/.Rproj.user/D1D10CF6/pcs/windowlayoutstate.pper @@ -0,0 +1,14 @@ +{ + "left" : { + "panelheight" : 1069, + "splitterpos" : 443, + "topwindowstate" : "NORMAL", + "windowheight" : 1108 + }, + "right" : { + "panelheight" : 1069, + "splitterpos" : 664, + "topwindowstate" : "NORMAL", + "windowheight" : 1108 + } +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/pcs/workbench-pane.pper b/.Rproj.user/D1D10CF6/pcs/workbench-pane.pper new file mode 100644 index 0000000..92c5223 --- /dev/null +++ b/.Rproj.user/D1D10CF6/pcs/workbench-pane.pper @@ -0,0 +1,6 @@ +{ + "TabSet1" : 0, + "TabSet2" : 3, + "TabZoom" : { + } +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/rmd-outputs b/.Rproj.user/D1D10CF6/rmd-outputs new file mode 100644 index 0000000..3f2ff2d --- /dev/null +++ b/.Rproj.user/D1D10CF6/rmd-outputs @@ -0,0 +1,5 @@ + + + + + diff --git a/.Rproj.user/D1D10CF6/saved_source_markers b/.Rproj.user/D1D10CF6/saved_source_markers new file mode 100644 index 0000000..2b1bef1 --- /dev/null +++ b/.Rproj.user/D1D10CF6/saved_source_markers @@ -0,0 +1 @@ +{"active_set":"","sets":[]} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/11252CE5 b/.Rproj.user/D1D10CF6/sdb/prop/11252CE5 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/11252CE5 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/16BD8E13 b/.Rproj.user/D1D10CF6/sdb/prop/16BD8E13 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/16BD8E13 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/1BB4BBB4 b/.Rproj.user/D1D10CF6/sdb/prop/1BB4BBB4 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/1BB4BBB4 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/1C27F867 b/.Rproj.user/D1D10CF6/sdb/prop/1C27F867 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/1C27F867 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/224CF03 b/.Rproj.user/D1D10CF6/sdb/prop/224CF03 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/224CF03 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/23571832 b/.Rproj.user/D1D10CF6/sdb/prop/23571832 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/23571832 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/2461C35 b/.Rproj.user/D1D10CF6/sdb/prop/2461C35 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/2461C35 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/2988B998 b/.Rproj.user/D1D10CF6/sdb/prop/2988B998 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/2988B998 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/2E17C2F1 b/.Rproj.user/D1D10CF6/sdb/prop/2E17C2F1 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/2E17C2F1 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/2E5A7688 b/.Rproj.user/D1D10CF6/sdb/prop/2E5A7688 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/2E5A7688 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/31175AC6 b/.Rproj.user/D1D10CF6/sdb/prop/31175AC6 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/31175AC6 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/3A3983B1 b/.Rproj.user/D1D10CF6/sdb/prop/3A3983B1 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/3A3983B1 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/3ED4EBC5 b/.Rproj.user/D1D10CF6/sdb/prop/3ED4EBC5 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/3ED4EBC5 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/4F48C490 b/.Rproj.user/D1D10CF6/sdb/prop/4F48C490 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/4F48C490 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/522B2964 b/.Rproj.user/D1D10CF6/sdb/prop/522B2964 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/522B2964 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/581924DB b/.Rproj.user/D1D10CF6/sdb/prop/581924DB new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/581924DB @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/5B6E4CB4 b/.Rproj.user/D1D10CF6/sdb/prop/5B6E4CB4 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/5B6E4CB4 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/5E3135C5 b/.Rproj.user/D1D10CF6/sdb/prop/5E3135C5 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/5E3135C5 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/5F19AB1A b/.Rproj.user/D1D10CF6/sdb/prop/5F19AB1A new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/5F19AB1A @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/614F6C89 b/.Rproj.user/D1D10CF6/sdb/prop/614F6C89 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/614F6C89 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/619E744A b/.Rproj.user/D1D10CF6/sdb/prop/619E744A new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/619E744A @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/62BD4C03 b/.Rproj.user/D1D10CF6/sdb/prop/62BD4C03 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/62BD4C03 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/715D0DA2 b/.Rproj.user/D1D10CF6/sdb/prop/715D0DA2 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/715D0DA2 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/7C28B417 b/.Rproj.user/D1D10CF6/sdb/prop/7C28B417 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/7C28B417 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/7E5B8828 b/.Rproj.user/D1D10CF6/sdb/prop/7E5B8828 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/7E5B8828 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/7EEE6E30 b/.Rproj.user/D1D10CF6/sdb/prop/7EEE6E30 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/7EEE6E30 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/81E308C8 b/.Rproj.user/D1D10CF6/sdb/prop/81E308C8 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/81E308C8 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/85BAB51C b/.Rproj.user/D1D10CF6/sdb/prop/85BAB51C new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/85BAB51C @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/8DC54783 b/.Rproj.user/D1D10CF6/sdb/prop/8DC54783 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/8DC54783 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/8F604BF1 b/.Rproj.user/D1D10CF6/sdb/prop/8F604BF1 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/8F604BF1 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/93C6AB2B b/.Rproj.user/D1D10CF6/sdb/prop/93C6AB2B new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/93C6AB2B @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/9E69FDB4 b/.Rproj.user/D1D10CF6/sdb/prop/9E69FDB4 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/9E69FDB4 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/9F226FAC b/.Rproj.user/D1D10CF6/sdb/prop/9F226FAC new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/9F226FAC @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/A5EB009E b/.Rproj.user/D1D10CF6/sdb/prop/A5EB009E new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/A5EB009E @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/A698C383 b/.Rproj.user/D1D10CF6/sdb/prop/A698C383 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/A698C383 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/A9ABBFEB b/.Rproj.user/D1D10CF6/sdb/prop/A9ABBFEB new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/A9ABBFEB @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/AC481488 b/.Rproj.user/D1D10CF6/sdb/prop/AC481488 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/AC481488 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/AD39FF43 b/.Rproj.user/D1D10CF6/sdb/prop/AD39FF43 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/AD39FF43 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/ADA38099 b/.Rproj.user/D1D10CF6/sdb/prop/ADA38099 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/ADA38099 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/B8960C40 b/.Rproj.user/D1D10CF6/sdb/prop/B8960C40 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/B8960C40 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/BF639043 b/.Rproj.user/D1D10CF6/sdb/prop/BF639043 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/BF639043 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/D1BE3A89 b/.Rproj.user/D1D10CF6/sdb/prop/D1BE3A89 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/D1BE3A89 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/D338C194 b/.Rproj.user/D1D10CF6/sdb/prop/D338C194 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/D338C194 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/D528021A b/.Rproj.user/D1D10CF6/sdb/prop/D528021A new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/D528021A @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/D5D2A63B b/.Rproj.user/D1D10CF6/sdb/prop/D5D2A63B new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/D5D2A63B @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/D64F2EA0 b/.Rproj.user/D1D10CF6/sdb/prop/D64F2EA0 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/D64F2EA0 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/D9F093AE b/.Rproj.user/D1D10CF6/sdb/prop/D9F093AE new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/D9F093AE @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/DB22ED13 b/.Rproj.user/D1D10CF6/sdb/prop/DB22ED13 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/DB22ED13 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/DD613721 b/.Rproj.user/D1D10CF6/sdb/prop/DD613721 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/DD613721 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/E0A1BF84 b/.Rproj.user/D1D10CF6/sdb/prop/E0A1BF84 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/E0A1BF84 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/E2A56787 b/.Rproj.user/D1D10CF6/sdb/prop/E2A56787 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/E2A56787 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/E538DE4 b/.Rproj.user/D1D10CF6/sdb/prop/E538DE4 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/E538DE4 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/EC53DD5E b/.Rproj.user/D1D10CF6/sdb/prop/EC53DD5E new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/EC53DD5E @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/F74CC49C b/.Rproj.user/D1D10CF6/sdb/prop/F74CC49C new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/F74CC49C @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/F9F4FDA9 b/.Rproj.user/D1D10CF6/sdb/prop/F9F4FDA9 new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/F9F4FDA9 @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/FB3EBAAF b/.Rproj.user/D1D10CF6/sdb/prop/FB3EBAAF new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/FB3EBAAF @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/prop/INDEX b/.Rproj.user/D1D10CF6/sdb/prop/INDEX new file mode 100644 index 0000000..d6e0c82 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/prop/INDEX @@ -0,0 +1,56 @@ +~%2FGitHub%2Fhyfo%2F.travis.yml="3ED4EBC5" +~%2FGitHub%2Fhyfo%2FDESCRIPTION="1BB4BBB4" +~%2FGitHub%2Fhyfo%2FNAMESPACE="3A3983B1" +~%2FGitHub%2Fhyfo%2FNEWS="5B6E4CB4" +~%2FGitHub%2Fhyfo%2FR%2FanalyzeTS.R="ADA38099" +~%2FGitHub%2Fhyfo%2FR%2Farray_dimension.R="224CF03" +~%2FGitHub%2Fhyfo%2FR%2FbiasCorrect(generic).R="7EEE6E30" +~%2FGitHub%2Fhyfo%2FR%2Fcase_anarbe.R="7E5B8828" +~%2FGitHub%2Fhyfo%2FR%2Fclasses.R="2E17C2F1" +~%2FGitHub%2Fhyfo%2FR%2FcollectData.R="31175AC6" +~%2FGitHub%2Fhyfo%2FR%2FextractPeriod(generic).R="2988B998" +~%2FGitHub%2Fhyfo%2FR%2FfillGap.R="1C27F867" +~%2FGitHub%2Fhyfo%2FR%2FgetAnnual(generic).R="D338C194" +~%2FGitHub%2Fhyfo%2FR%2FgetEnsemble.R="715D0DA2" +~%2FGitHub%2Fhyfo%2FR%2FgetPreciBar(generic).R="BF639043" +~%2FGitHub%2Fhyfo%2FR%2FgetSpatialMap.R="16BD8E13" +~%2FGitHub%2Fhyfo%2FR%2Flist2dataframe.R="D5D2A63B" +~%2FGitHub%2Fhyfo%2FR%2Fmulti-biasCorrect(generic).R="23571832" +~%2FGitHub%2Fhyfo%2FR%2Fncdf.R="DD613721" +~%2FGitHub%2Fhyfo%2FR%2Freadfolders.R="8DC54783" +~%2FGitHub%2Fhyfo%2FR%2Fresample(generic).R="85BAB51C" +~%2FGitHub%2Fhyfo%2FR%2Fshp2cat.R="2E5A7688" +~%2FGitHub%2Fhyfo%2FR%2Fstartup.R="FB3EBAAF" +~%2FGitHub%2Fhyfo%2FREADME.md="2461C35" +~%2FGitHub%2Fhyfo%2Fman%2FapplyBiasFactor.Rd="D9F093AE" +~%2FGitHub%2Fhyfo%2Fman%2FbiasCorrect.Rd="A5EB009E" +~%2FGitHub%2Fhyfo%2Fman%2FcollectData.Rd="9E69FDB4" +~%2FGitHub%2Fhyfo%2Fman%2FcollectData_csv_anarbe.Rd="F9F4FDA9" +~%2FGitHub%2Fhyfo%2Fman%2FcollectData_excel_anarbe.Rd="A9ABBFEB" +~%2FGitHub%2Fhyfo%2Fman%2FcollectData_txt_anarbe.Rd="AC481488" +~%2FGitHub%2Fhyfo%2Fman%2FdownscaleNcdf.Rd="DB22ED13" +~%2FGitHub%2Fhyfo%2Fman%2FextractPeriod.Rd="9F226FAC" +~%2FGitHub%2Fhyfo%2Fman%2FfillGap.Rd="522B2964" +~%2FGitHub%2Fhyfo%2Fman%2FgetAnnual.Rd="F74CC49C" +~%2FGitHub%2Fhyfo%2Fman%2FgetBiasFactor.Rd="5E3135C5" +~%2FGitHub%2Fhyfo%2Fman%2FgetEnsem_comb.Rd="D64F2EA0" +~%2FGitHub%2Fhyfo%2Fman%2FgetFrcEnsem.Rd="A698C383" +~%2FGitHub%2Fhyfo%2Fman%2FgetHisEnsem.Rd="D528021A" +~%2FGitHub%2Fhyfo%2Fman%2FgetLMom.Rd="11252CE5" +~%2FGitHub%2Fhyfo%2Fman%2FgetMoment.Rd="5F19AB1A" +~%2FGitHub%2Fhyfo%2Fman%2FgetNcdfVar.Rd="B8960C40" +~%2FGitHub%2Fhyfo%2Fman%2FgetPreciBar.Rd="EC53DD5E" +~%2FGitHub%2Fhyfo%2Fman%2FgetPreciBar_comb.Rd="614F6C89" +~%2FGitHub%2Fhyfo%2Fman%2FgetSpatialMap.Rd="93C6AB2B" +~%2FGitHub%2Fhyfo%2Fman%2FgetSpatialMap_comb.Rd="581924DB" +~%2FGitHub%2Fhyfo%2Fman%2FgetSpatialMap_mat.Rd="8F604BF1" +~%2FGitHub%2Fhyfo%2Fman%2Flist2Dataframe.Rd="81E308C8" +~%2FGitHub%2Fhyfo%2Fman%2FloadNcdf.Rd="E2A56787" +~%2FGitHub%2Fhyfo%2Fman%2FplotTS.Rd="4F48C490" +~%2FGitHub%2Fhyfo%2Fman%2FplotTS_comb.Rd="E0A1BF84" +~%2FGitHub%2Fhyfo%2Fman%2Fresample.Rd="D1BE3A89" +~%2FGitHub%2Fhyfo%2Fman%2Fshp2cat.Rd="619E744A" +~%2FGitHub%2Fhyfo%2Fman%2Ftestdl.Rd="7C28B417" +~%2FGitHub%2Fhyfo%2Fman%2FtgridData.Rd="E538DE4" +~%2FGitHub%2Fhyfo%2Fman%2FwriteNcdf.Rd="AD39FF43" +~%2FGitHub%2Fhyfo%2Fvignettes%2Fhyfo.Rmd="62BD4C03" diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/12499DD8 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/12499DD8 new file mode 100644 index 0000000..564dc6b --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/12499DD8 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/ncdf.R\n\\name{writeNcdf}\n\\alias{writeNcdf}\n\\title{Write to NetCDF file using hyfo list file}\n\\usage{\nwriteNcdf(gridData, filePath, missingValue = 1e+20, tz = \"GMT\",\n units = NULL, version = 3)\n}\n\\arguments{\n\\item{gridData}{A hyfo list file from \\code{\\link{loadNcdf}}}\n\n\\item{filePath}{A path of the new NetCDF file, should end with \".nc\"}\n\n\\item{missingValue}{A number representing the missing value in the NetCDF file, default\nis 1e20\n#' @param tz A string representing the time zone, default is GMT, if you know what time zone is \nyou can assign it in the argument. If \\code{tz = ''}, current time zone will be taken.}\n\n\\item{tz}{time zone, default is \"GMT\"}\n\n\\item{units}{A string showing in which unit you are putting in the NetCDF file, it can be \nseconds or days and so on. If not specified, the function will pick up the possible largest \ntime units from \\code{c('weeks', 'days', 'hours', 'mins', 'secs')}}\n\n\\item{version}{ncdf file versions, default is 3, if 4 is chosen, output file will be foreced to version 4.}\n}\n\\value{\nAn NetCDF version 3 file.\n}\n\\description{\nWrite to NetCDF file using hyfo list file\n}\n\\examples{\n# First open the test NETcDF file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n\n\n# Then if you don't know the variable name, you can use \\\\code{getNcdfVar} to get variable name\nvarname <- getNcdfVar(filePath)\n\nnc <- loadNcdf(filePath, varname)\n\n# Then write to your work directory\n\nwriteNcdf(nc, 'test.nc')\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\nEarlier) Format Data Files. R package version 1.14.1.\nhttps://CRAN.R-project.org/package=ncdf4\n\n\\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\nversion 2.2-6. http://meteo.unican.es/ecoms-udg\n\n}\n}\n\n", + "created" : 1488015015739.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1114700098", + "id" : "12499DD8", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/writeNcdf.Rd", + "project_path" : "man/writeNcdf.Rd", + "properties" : { + }, + "relative_order" : 49, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/1F14F77D b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/1F14F77D new file mode 100644 index 0000000..1669eae --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/1F14F77D @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/shp2cat.R\n\\name{shp2cat}\n\\alias{shp2cat}\n\\title{Get a catchment object from selected shape file.}\n\\usage{\nshp2cat(filePath)\n}\n\\arguments{\n\\item{filePath}{A string representing the path of the shape file.}\n}\n\\value{\nA catchment object can be used in \\code{getSpatialMap()}.\n}\n\\description{\nGet a catchment object from selected shape file.\n}\n\\details{\nThis function is based on the package \\code{rgdal} and \\code{sp}, and the output comes from the package \n\\code{sp}\n}\n\\examples{\n#open internal file\nfile <- system.file(\"extdata\", \"testCat.shp\", package = \"hyfo\")\ncatchment <- shp2cat(file)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item Roger Bivand, Tim Keitt and Barry Rowlingson (2015). rgdal: Bindings for the Geospatial Data\nAbstraction Library. R package version 1.0-4. https://CRAN.R-project.org/package=rgdal\n\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n}\n}\n\n", + "created" : 1488014912691.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1199630483", + "id" : "1F14F77D", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/shp2cat.Rd", + "project_path" : "man/shp2cat.Rd", + "properties" : { + }, + "relative_order" : 47, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2A6E2BEA b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2A6E2BEA new file mode 100644 index 0000000..1fb51f4 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2A6E2BEA @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "\n\n\n#' Biascorrect the input timeseries or hyfo dataset\n#' \n#' Biascorrect the input time series or dataset, the input time series or dataset should consist of observation, hindcast, and forecast.\n#' observation and hindcast should belong to the same period, in order to calibrate. Then the modified forecast\n#' will be returned. If the input is a time series, first column should be date column and rest columns should be \n#' the value column. If the input is a hyfo dataset, the dataset should be the result of \\code{loadNcdf}, or a list\n#' file with the same format. \n#' \n#' \n#' @param frc a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the forecast to be calibrated.\n#' @param hindcast a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \n#' representing the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as\n#' observation data. Check details for more information.\n#' @param obs a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the observation data.\n#' @param method bias correct method, including 'delta', 'scaling'..., default is 'scaling'\n#' @param scaleType only when the method \"scaling\" is chosen, scaleType will be available. Two different types\n#' of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method. More info check \n#' details. Default scaleType is 'multi'.\n#' @param preci If the precipitation is biascorrected, then you have to assign \\code{preci = TRUE}. Since for\n#' precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. \n#' Default is FALSE, refer to details.\n#' @param prThreshold The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).\n#' @param extrapolate When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs.\n#' If 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.\n#' @details \n#' \n#' Since climate forecast is based on global condition, when downscaling to different regions, it may include\n#' some bias, biascorrection is used then to fix the bias.\n#' \n#' \\strong{Hindcast}\n#' \n#' In order to bias correct, we need to pick up some data from the forecast to train with\n#' the observation, which is called hindcast in this function. Using hindcast and observation, \n#' the program can analyze the bias and correct the bias in the forecast. \n#' \n#' Hindcast should have \\strong{EVERY} attributes that forecast has.\n#' \n#' Hindcast is also called re-forecast, is the forecast of the past. E.g. you have a forecast from year 2000-2010, assuming now you are in 2005. So from 2000-2005, this period\n#' is the hindcast period, and 2005-2010, this period is the forecast period.\n#'\n#' Hindcast can be the same as forecast, i.e., you can use forecast itself as hindcast to train the bias correction.\n#'\n#'\n#' \\strong{How it works}\n#' \n#' Forecast product has to be calibrated, usually the system is doing forecast in real time. So, e.g., if the \n#' forecast starts from year 2000, assuming you are in year 2003, then you will have 3 years' hindcast \n#' data (year 2000-2003), which can be used to calibrate. And your forecast period is (2003-2004)\n#' \n#' E.g. you have observation from 2001-2002, this is your input obs. Then you can take the same \n#' period (2001-2002) from the forecast, which is the hindcast period. For forecast, you can take any period.\n#' The program will evaluate the obs and hindcast, to get the modification of the forecast, and then add the \n#' modification to the forecast data.\n#' \n#' The more categorized input, the more accurate result you will get. E.g., if you want to \n#' bias correct a forecast for winter season. So you'd better to extract all the winter period\n#' in the hindcast and observation to train. \\code{extractPeriod} can be used for this purpose.\n#' \n#' \\strong{method}\n#' \n#' Different methods used in the bias correction. Among which, delta, scaling can be applied\n#' to different kinds of parameters, with no need to set \\code{preci}; eqm has two conditions for rainfall data and other data,\n#' it needs user to input \\code{preci = TRUE/FALSE} to point to different conditions; gqm is\n#' designed for rainfall data, so \\code{preci = TRUE} needs to be set.\n#' \n#' \\strong{delta}\n#' \n#' This method consists on adding to the observations the mean change signal (delta method). \n#' This method is applicable to any kind of variable but it is preferable to avoid it for bounded variables\n#' (e.g. precipitation, wind speed, etc.) because values out of the variable range could be obtained \n#' (e.g. negative wind speeds...)\n#' \n#' \\strong{scaling}\n#' \n#' This method consists on scaling the simulation with the difference (additive) or quotient (multiplicative) \n#' between the observed and simulated means in the train period. The \\code{additive} or \\code{multiplicative}\n#' correction is defined by parameter \\code{scaling.type} (default is \\code{additive}).\n#' The additive version is preferably applicable to unbounded variables (e.g. temperature) \n#' and the multiplicative to variables with a lower bound (e.g. precipitation, because it also preserves the frequency). \n#' \n#' \\strong{eqm}\n#' \n#' Empirical Quantile Mapping. This is a very extended bias correction method which consists on calibrating the simulated Cumulative Distribution Function (CDF) \n#' by adding to the observed quantiles both the mean delta change and the individual delta changes in the corresponding quantiles. \n#' This method is applicable to any kind of variable.\n#' \n#' It can keep the extreme value, if you choose constant extrapolation method. But then you will face the risk\n#' that the extreme value is an error.\n#' \n#' \\strong{gqm}\n#' \n#' Gamma Quantile Mapping. This method is described in Piani et al. 2010 and is applicable only to precipitation. It is based on the initial assumption that both observed\n#' and simulated intensity distributions are well approximated by the gamma distribution, therefore is a parametric q-q map \n#' that uses the theorical instead of the empirical distribution. \n#' \n#' It can somehow filter some extreme values caused by errors, while keep the extreme value. Seems more reasonable.\n#' Better have a long period of training, and the if the forecast system is relatively stable.\n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples \n#' \n#' ######## hyfo grid file biascorrection\n#' ########\n#' \n#' # If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect\n#' # the file.\n#' \n#' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' data(tgridData)\n#' # Since the example data, has some NA values, the process will include some warning #message, \n#' # which can be ignored in this case.\n#' \n#' \n#' \n#' \n#' # Then we will use nc data as forecasting data, and use itself as hindcast data,\n#' # use tgridData as observation.\n#' newFrc <- biasCorrect(nc, nc, tgridData) \n#' newFrc <- biasCorrect(nc, nc, tgridData, scaleType = 'add') \n#' newFrc <- biasCorrect(nc, nc, tgridData, method = 'eqm', extrapolate = 'constant', \n#' preci = TRUE) \n#' newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) \n#' \n#' \n#' ######## Time series biascorrection\n#' ########\n#' \n#' # Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\n#' data(testdl)\n#' \n#' # common period has to be extracted in order to better train the forecast.\n#' \n#' datalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' frc <- datalist[[1]]\n#' hindcast <- datalist[[2]]\n#' obs <- datalist[[3]]\n#' \n#' \n#' # The data used here is just for example, so there could be negative data.\n#' \n#' # default method is scaling, with 'multi' scaleType\n#' frc_new <- biasCorrect(frc, hindcast, obs)\n#' \n#' # for precipitation data, extra process needs to be executed, so you have to tell\n#' # the program that it is a precipitation data.\n#' \n#' frc_new1 <- biasCorrect(frc, hindcast, obs, preci = TRUE)\n#' \n#' # You can use other scaling methods to biascorrect.\n#' frc_new2 <- biasCorrect(frc, hindcast, obs, scaleType = 'add')\n#' \n#' # \n#' frc_new3 <- biasCorrect(frc, hindcast, obs, method = 'eqm', preci = TRUE)\n#' frc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE)\n#' \n#' plotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n#' \n#' # You can also give name to this input list.\n#' TSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\n#' names(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\n#' plotTS(list = TSlist, plot = 'cum')\n#' \n#' \n#' # If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n#' # for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n#' # for example Dec, Jan and Feb of every year from year 1999-2005.\n#' # In such case, you need to extract certain months and years from observed time series.\n#' # extractPeriod() can be then used.\n#' \n#' \n#'\n#'\n#'\n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' @author Yuanchao Xu \\email{xuyuanchao37@@gmail.com }\n#' @importFrom methods setMethod\n#' @export\n#' \nsetGeneric('biasCorrect', function(frc, hindcast, obs, method = 'scaling', scaleType = 'multi', \n preci = FALSE, prThreshold = 0, extrapolate = 'no') {\n standardGeneric('biasCorrect')\n})\n\n\n# Since in new version of roxygen2, describeIn was changed, https://stackoverflow.com/questions/24246594/automatically-document-all-methods-of-an-s4-generic-using-roxygen2\n# so use rdname instead\n#' @rdname biasCorrect\n#' \nsetMethod('biasCorrect', signature('data.frame', 'data.frame', 'data.frame'),\n function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- biasCorrect.TS(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n#' @rdname biasCorrect\nsetMethod('biasCorrect', signature('list', 'list', 'list'), \n function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- biasCorrect.list(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n\nbiasCorrect.TS <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n # First check if the first column is Date\n if (!grepl('-|/', obs[1, 1]) | !grepl('-|/', hindcast[1, 1]) | !grepl('-|/', frc[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.If your input is a hyfo dataset, put input = \"hyfo\" as an\n argument, check help for more info.')\n }\n # change to date type is easier, but in case in future the flood part is added, Date type doesn't have\n # hour, min and sec, so, it's better to convert it into POSIxlt.\n \n # if condition only accepts one condition, for list comparison, there are a lot of conditions, better\n # further process it, like using any.\n if (any(as.POSIXlt(hindcast[, 1]) != as.POSIXlt(obs[, 1]))) {\n warning('time of obs and time of hindcast are not the same, which may cause inaccuracy in \n the calibration.')\n }\n n <- ncol(frc)\n \n # For every column, it's biascorrected respectively.\n frc_data <- lapply(2:n, function(x) biasCorrect_core(frc[, x], hindcast[, x], obs[, 2], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate))\n frc_data <- do.call('cbind', frc_data)\n rownames(frc_data) <- NULL\n \n names <- colnames(frc)\n frc_new <- data.frame(frc[, 1], frc_data)\n colnames(frc_new) <- names\n return(frc_new)\n}\n\nbiasCorrect.list <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n ## Check if the data is a hyfo grid data.\n checkHyfo(frc, hindcast, obs)\n \n hindcastData <- hindcast$Data\n obsData <- obs$Data\n frcData <- frc$Data\n \n ## save frc dimension order, at last, set the dimension back to original dimension\n frcDim <- attributes(frcData)$dimensions\n \n ## ajust the dimension into general dimension order.\n hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time'))\n obsData <- adjustDim(obsData, ref = c('lon', 'lat', 'time'))\n \n ## CheckDimLength, check if all the input dataset has different dimension length\n # i.e. if they all have the same lon and lat number.\n checkDimLength(frcData, hindcastData, obsData, dim = c('lon', 'lat'))\n \n \n # Now real bias correction is executed.\n \n memberIndex <- grepAndMatch('member', attributes(frcData)$dimensions)\n \n # For dataset that has a member part \n if (length(memberIndex) != 0) {\n # check if frcData and hindcastData has the same dimension and length.\n checkDimLength(frcData, hindcastData, dim = 'member')\n \n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time', 'member'))\n \n # The following code may speed up because it doesn't use for loop.\n # It firstly combine different array into one array. combine the time \n # dimension of frc, hindcast and obs. Then use apply, each time extract \n # the total time dimension, and first part is frc, second is hindcast, third\n # is obs. Then use these three parts to bias correct. All above can be written\n # in one function and called within apply. But too complicated to understand,\n # So save it for future use maybe.\n \n # for (member in 1:dim(frcData)[4]) {\n # totalArr <- array(c(frcData[,,, member], hindcastData[,,, member], obsData),\n # dim = c(dim(frcData)[1], dim(frcData)[2], \n # dim(frcData)[3] + dim(hindcastData)[3] + dim(obsData)[3]))\n # }\n \n \n for (member in 1:dim(frcData)[4]) {\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,, member] <- biasCorrect_core(frcData[lon, lat,,member], hindcastData[lon, lat,, member], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n } else {\n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time'))\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,] <- biasCorrect_core(frcData[lon, lat,], hindcastData[lon, lat,], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n \n frcData <- adjustDim(frcData, ref = frcDim)\n frc$Data <- frcData\n frc$biasCorrected_by <- method\n frc_new <- frc\n return(frc_new)\n}\n\n\n\n\n\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats ecdf quantile pgamma qgamma rgamma\n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' \n#' \n# this is only used to calculate the value column, \nbiasCorrect_core <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate){\n # If the variable is precipitation, some further process needs to be added.\n # The process is taken from downscaleR, to provide a more reasonable hindcast, used in the calibration.\n \n \n # check if frc, hindcast or obs are all na values\n if (!any(!is.na(obs)) | !any(!is.na(frc)) | !any(!is.na(hindcast))) {\n warning('In this cell, frc, hindcast or obs data is missing. No biasCorrection for this cell.')\n return(NA)\n }\n \n \n if (preci == TRUE) {\n preprocessHindcast_res <- preprocessHindcast(hindcast = hindcast, obs = obs, prThreshold = prThreshold)\n hindcast <- preprocessHindcast_res[[1]]\n minHindcastPreci <- preprocessHindcast_res[[2]]\n }\n \n # default is the simplest method in biascorrection, just do simple addition and subtraction.\n if (method == 'delta') {\n if (length(frc) != length(obs)) stop('This method needs frc data have the same length as obs data.')\n # comes from downscaleR biascorrection method\n frcMean <- mean(frc, na.rm = TRUE)\n hindcastMean <- mean(hindcast, na.rm = TRUE)\n frc <- obs - hindcastMean + frcMean\n \n } else if (method == 'scaling') {\n obsMean <- mean(obs, na.rm = TRUE)\n hindcastMean <- mean(hindcast, na.rm = TRUE)\n \n if (scaleType == 'multi') {\n frc <- frc / hindcastMean * obsMean\n \n } else if (scaleType == 'add') {\n frc <- frc - hindcastMean + obsMean\n }\n \n \n } else if (method == 'eqm') {\n if (preci == FALSE) {\n frc <- biasCorrect_core_eqm_nonPreci(frc, hindcast, obs, extrapolate, prThreshold)\n } else {\n frc <- biasCorrect_core_eqm_preci(frc, hindcast, obs, minHindcastPreci, extrapolate,\n prThreshold)\n }\n \n } else if (method == 'gqm') {\n if (preci == FALSE) stop ('gqm method only applys to precipitation, please set preci = T')\n frc <- biasCorrect_core_gqm(frc, hindcast, obs, prThreshold, minHindcastPreci)\n }\n \n \n return(frc)\n}\n\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats rgamma\npreprocessHindcast <- function(hindcast, obs, prThreshold) {\n lowerIndex <- length(which(obs < prThreshold))\n \n # In the original function, this minHindcastPreci is Pth[,i,j] in downscaleR, and it is originally\n # set to NA, which is not so appropriate for all the precipitations.\n # In the original function, there are only two conditions, 1. all the obs less than threshold\n # 2. there are some obs less than threshold. \n # While, if we set threshold to 0, there could be a 3rd condition, all the obs no less than threshold.\n # Here I set this situation, firstly set minHindcastPreci to the min of the hindcast. Because in future\n # use, 'eqm' method is going to use this value.\n \n # The problem above has been solved.\n \n \n if (lowerIndex >= 0 & lowerIndex < length(obs)) {\n index <- sort(hindcast, decreasing = FALSE, na.last = NA, index.return = TRUE)$ix\n hindcast_sorted <- sort(hindcast, decreasing = FALSE, na.last = NA)\n # minHindcastPreci is the min preci over threshold FOR ***HINDCAST***\n # But use obs to get the lowerIndex, so obs_sorted[lowerIndex + 1] > prThreshold, but\n # hindcast_sorted[lowerIndex + 1] may greater than or smaller than ptThreshold\n \n \n # It would be better to understand if you draw two lines: hindcast_sorted and obs_sorted\n # with y = prThreshold, you will find the difference of the two.\n \n # In principle, the value under the threshold needs to be replaced by some other reasonable value.\n # simplest way \n minHindcastPreci <- hindcast_sorted[lowerIndex + 1]\n \n # Also here if minHindcastPreci is 0 and prThreshold is 0, will cause problem, bettter set \n # I set it prThreshold != 0 \n if (minHindcastPreci <= prThreshold & prThreshold != 0) {\n obs_sorted <- sort(obs, decreasing = FALSE, na.last = NA)\n \n # higherIndex is based on hindcast\n higherIndex <- which(hindcast_sorted > prThreshold & !is.na(hindcast_sorted))\n \n if (length(higherIndex) == 0) {\n higherIndex <- max(which(!is.na(hindcast_sorted)))\n higherIndex <- min(length(obs_sorted), higherIndex)\n } else {\n higherIndex <- min(higherIndex)\n }\n # here I don't know why choose 6.\n # Written # [Shape parameter Scale parameter] in original package\n # according to the reference and gamma distribution, at least 6 values needed to fit gamma\n # distribution.\n if (length(unique(obs_sorted[(lowerIndex + 1):higherIndex])) < 6) {\n hindcast_sorted[(lowerIndex + 1):higherIndex] <- mean(obs_sorted[(lowerIndex + 1):higherIndex], \n na.rm = TRUE)\n } else {\n obsGamma <- fitdistr(obs_sorted[(lowerIndex + 1):higherIndex], \"gamma\")\n \n # this is to replace the original hindcast value between lowerIndex and higherIndex with \n # some value taken from gamma distribution just generated.\n hindcast_sorted[(lowerIndex + 1):higherIndex] <- rgamma(higherIndex - lowerIndex, obsGamma$estimate[1], \n rate = obsGamma$estimate[2])\n }\n hindcast_sorted <- sort(hindcast_sorted, decreasing = FALSE, na.last = NA)\n \n } \n minIndex <- min(lowerIndex, length(hindcast))\n hindcast_sorted[1:minIndex] <- 0\n hindcast[index] <- hindcast_sorted\n \n } else if (lowerIndex == length(obs)) {\n \n index <- sort(hindcast, decreasing = FALSE, na.last = NA, index.return = TRUE)$ix\n hindcast_sorted <- sort(hindcast, decreasing = FALSE, na.last = NA)\n minHindcastPreci <- hindcast_sorted[lowerIndex]\n \n # here is to compare with hindcast, not obs\n minIndex <- min(lowerIndex, length(hindcast))\n hindcast_sorted[1:minIndex] <- 0\n hindcast[index] <- hindcast_sorted\n \n }\n return(list(hindcast, minHindcastPreci))\n}\n\nbiasCorrect_core_eqm_nonPreci <- function(frc, hindcast, obs, extrapolate, prThreshold) {\n ecdfHindcast <- ecdf(hindcast)\n \n if (extrapolate == 'constant') {\n higherIndex <- which(frc > max(hindcast, na.rm = TRUE))\n lowerIndex <- which(frc < min(hindcast, na.rm = TRUE))\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(frc), extrapolateIndex)\n \n # In case extrapolateIndex is of length zero, than extrapolate cannot be used afterwards\n # So use setdiff(1:length(sim), extrapolateIndex), if extrapolateIndex == 0, than it will\n # return 1:length(sim)\n \n if (length(higherIndex) > 0) {\n maxHindcast <- max(hindcast, na.rm = TRUE)\n dif <- maxHindcast - max(obs, na.rm = TRUE)\n frc[higherIndex] <- frc[higherIndex] - dif\n }\n \n if (length(lowerIndex) > 0) {\n minHindcast <- min(hindcast, na.rm = TRUE)\n dif <- minHindcast - min(obs, nna.rm = TRUE)\n frc[lowerIndex] <- frc[lowerIndex] - dif\n }\n \n frc[non_extrapolateIndex] <- quantile(obs, probs = ecdfHindcast(frc[non_extrapolateIndex]), \n na.rm = TRUE, type = 4)\n } else {\n frc <- quantile(obs, probs = ecdfHindcast(frc), na.rm = TRUE, type = 4)\n }\n return(frc)\n}\n\nbiasCorrect_core_eqm_preci <- function(frc, hindcast, obs, minHindcastPreci, extrapolate, \n prThreshold) {\n \n # Most of time this condition seems useless because minHindcastPreci comes from hindcast, so there will be\n # always hindcast > minHindcastPreci exists.\n # Unless one condition that minHindcastPreci is the max in the hindcast, than on hindcast > minHindcastPreci\n if (length(which(hindcast > minHindcastPreci)) > 0) {\n \n ecdfHindcast <- ecdf(hindcast[hindcast > minHindcastPreci])\n \n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n \n # drizzle is to see whether there are some precipitation between the min frc (over threshold) and \n # min hindcast (over threshold).\n drizzle <- which(frc > minHindcastPreci & frc <= min(hindcast[hindcast > minHindcastPreci], na.rm = TRUE) \n & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n \n if (extrapolate == 'constant') {\n \n # This higher and lower index mean the extrapolation part\n higherIndex <- which(frc[rain] > max(hindcast, na.rm = TRUE))\n lowerIndex <- which(frc[rain] < min(hindcast, na.rm = TRUE))\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(rain), extrapolateIndex)\n \n if (length(higherIndex) > 0) {\n maxHindcast <- max(hindcast, na.rm = TRUE)\n dif <- maxHindcast - max(obs, na.rm = TRUE)\n frc[rain[higherIndex]] <- frc[higherIndex] - dif\n }\n \n if (length(lowerIndex) > 0) {\n minHindcast <- min(hindcast, na.rm = TRUE)\n dif <- minHindcast - min(obs, nna.rm = TRUE)\n frc[rain[lowerIndex]] <- frc[lowerIndex] - dif\n }\n \n # Here the original function doesn't accout for the situation that extraploateIndex is 0\n # if it is 0, rain[-extraploateIndex] would be nothing\n \n # Above has been solved by using setdiff.\n frc[rain[non_extrapolateIndex]] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain[non_extrapolateIndex]]), \n na.rm = TRUE, type = 4)\n } else {\n \n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain]), na.rm = TRUE, type = 4)\n }\n }\n if (length(drizzle) > 0){\n \n # drizzle part is a seperate part. it use the ecdf of frc (larger than minHindcastPreci) to \n # biascorrect the original drizzle part\n frc[drizzle] <- quantile(frc[which(frc > min(hindcast[which(hindcast > minHindcastPreci)], na.rm = TRUE) & \n !is.na(frc))], probs = ecdfFrc(frc[drizzle]), na.rm = TRUE, \n type = 4)\n }\n \n frc[noRain] <- 0\n \n } else {\n # in this condition minHindcastPreci is the max of hindcast, so all hindcast <= minHindcastPreci\n # And frc distribution is used then.\n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], probs = ecdfFrc(frc[rain]), \n na.rm = TRUE, type = 4)\n }\n frc[noRain]<-0\n }\n return(frc)\n}\n\nbiasCorrect_core_gqm <- function(frc, hindcast, obs, prThreshold, minHindcastPreci) {\n if (any(obs > prThreshold)) {\n \n ind <- which(obs > prThreshold & !is.na(obs))\n obsGamma <- fitdistr(obs[ind],\"gamma\")\n ind <- which(hindcast > 0 & !is.na(hindcast))\n hindcastGamma <- fitdistr(hindcast[ind],\"gamma\")\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n \n probF <- pgamma(frc[rain], hindcastGamma$estimate[1], rate = hindcastGamma$estimate[2])\n frc[rain] <- qgamma(probF,obsGamma$estimate[1], rate = obsGamma$estimate[2])\n frc[noRain] <- 0\n } else {\n warning('All the observations of this cell(station) are lower than the threshold, \n no bias correction applied.')\n }\n return(frc)\n }\n", + "created" : 1483875773075.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3009424597", + "id" : "2A6E2BEA", + "lastKnownWriteTime" : 1488015187, + "last_content_update" : 1488015187937, + "path" : "~/GitHub/hyfo/R/biasCorrect(generic).R", + "project_path" : "R/biasCorrect(generic).R", + "properties" : { + }, + "relative_order" : 2, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2AC49E50 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2AC49E50 new file mode 100644 index 0000000..f955053 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2AC49E50 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/ncdf.R\n\\name{downscaleNcdf}\n\\alias{downscaleNcdf}\n\\title{Downscale NetCDF file}\n\\usage{\ndownscaleNcdf(gridData, year = NULL, month = NULL, lon = NULL,\n lat = NULL)\n}\n\\arguments{\n\\item{gridData}{A hyfo list file from \\code{\\link{loadNcdf}}}\n\n\\item{year}{A vector of the target year. e.g. \\code{year = 2000}, \\code{year = 1980:2000}}\n\n\\item{month}{A vector of the target month. e.g. \\code{month = 2}, \\code{month = 3:12}}\n\n\\item{lon}{A vector of the range of the downscaled longitude, should contain a max value\nand a min value. e.g. \\code{lon = c(-1.5, 2,5)}}\n\n\\item{lat}{A vector of the range of the downscaled latitude, should contain a max value\nand a min value. e.g. \\code{lat = c(32,2, 36)}}\n}\n\\value{\nA downscaled hyfo list file.\n}\n\\description{\nDownscale NetCDF file\n}\n\\examples{\n# First open the test NETcDF file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n\n\n# Then if you don't know the variable name, you can use \\\\code{getNcdfVar} to get variable name\nvarname <- getNcdfVar(filePath)\n\nnc <- loadNcdf(filePath, varname)\n\n# Then write to your work directory\n\nnc1 <- downscaleNcdf(nc, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7))\nnc2 <- downscaleNcdf(nc, year = 2005, month = 3:8, lon = c(-2, -0.5), lat = c(43.2, 43.7))\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\n\\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\nversion 2.2-6. http://meteo.unican.es/ecoms-udg\n}\n}\n\n", + "created" : 1487956113315.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "2788518444", + "id" : "2AC49E50", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/downscaleNcdf.Rd", + "project_path" : "man/downscaleNcdf.Rd", + "properties" : { + }, + "relative_order" : 26, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2F3179D4 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2F3179D4 new file mode 100644 index 0000000..02e91e4 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/2F3179D4 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "# Sample .travis.yml for R projects\n\nlanguage: r\nwarnings_are_errors: true\nsudo: required\ndist: precise\ngroup: edge\nenv:\n global:\n - NOT_CRAN = true\nbefore_install: \n echo \"options(repos = c(CRAN='https://cran.rstudio.com'))\" > ~/.Rprofile\n# - sudo apt-get autoclean\n# - sudo aptitude install libgdal-dev\napt_packages:\n - libnetcdf-dev\n - udunits-bin\n - libudunits2-dev\n - netcdf-bin\n# - libproj-dev\n# - libcurl4-gnutls-dev\n# - libdap-dev\n# - libgdal-dev\n# - libgdal1h\n# - libhdf5-7=1.8.11-3ubuntu1~precise1~ppa1\n# - libhdf5-dev\n# - libhdf5-serial-dev\n# - libgdal-dev\n# - libgdal1-dev\nr_binary_packages:\n - rgdal\n - rgeos\n", + "created" : 1487955784861.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "385747178", + "id" : "2F3179D4", + "lastKnownWriteTime" : 1487955804, + "last_content_update" : 1487955804585, + "path" : "~/GitHub/hyfo/.travis.yml", + "project_path" : ".travis.yml", + "properties" : { + }, + "relative_order" : 20, + "source_on_save" : false, + "source_window" : "", + "type" : "yaml" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/303058FC b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/303058FC new file mode 100644 index 0000000..7cec6da --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/303058FC @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "## For package updates information\n\n#' @importFrom utils packageDescription\nhyfoUpdates <- function(){\n page <- readLines('https://yuanchao-xu.github.io/hyfo/')\n updatesLine <- grep('id=\\\\\"updates\"', page)\n versionLine <- updatesLine + 2\n \n version <- unlist(strsplit(page[versionLine], split = ' '))[2]\n version_local <- packageDescription(\"hyfo\")$Version\n \n \n # the first tow digit is the most important part of the version\n version12 <- unlist(strsplit(version, split = \"[.]\"))[1:2]\n version_local12 <- unlist(strsplit(version_local, split = \"[.]\"))[1:2]\n \n sameVersion <- version12 == version_local12\n \n if (any(sameVersion == FALSE)) {\n # generate message\n version_msg <- strsplit(strsplit(page[versionLine], split = '

')[[1]][2], split = '

')[[1]]\n infoLine_start <- versionLine + 2\n infoLine_end <- grep('

For historical releases and the introduction of updates about each version', page) - 1\n info_msg <- character()\n for (infoLine in infoLine_start:infoLine_end) {\n info_line <- strsplit(strsplit(page[infoLine], split = '>')[[1]][2], split = '<')[[1]][1]\n if (!is.na(info_line)) info_msg <- c(info_msg, info_line)\n }\n \n install_msg <- 'More details on https://yuanchao-xu.github.io/hyfo/'\n \n message_out <- paste(version_msg, paste(info_msg, collapse = '\\n'), install_msg, sep = '\\n')\n } else message_out <- NULL\n return(message_out)\n}\n\n.onAttach <- function(libname, pkgname) {\n message_out <- suppressWarnings(try(hyfoUpdates(), silent = TRUE))\n if (!is.null(message_out)) {\n if (grepl('Version', message_out)) {\n packageStartupMessage(message_out)\n }\n }\n}\n", + "created" : 1488018121300.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1326432164", + "id" : "303058FC", + "lastKnownWriteTime" : 1488018146, + "last_content_update" : 1488018146748, + "path" : "~/GitHub/hyfo/R/startup.R", + "project_path" : "R/startup.R", + "properties" : { + }, + "relative_order" : 54, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/3405765E b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/3405765E new file mode 100644 index 0000000..a5daf2c --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/3405765E @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/ncdf.R\n\\name{loadNcdf}\n\\alias{loadNcdf}\n\\title{Load NetCDF file}\n\\usage{\nloadNcdf(filePath, varname, tz = \"GMT\", ...)\n}\n\\arguments{\n\\item{filePath}{A path pointing to the NetCDF file, version3.}\n\n\\item{varname}{A character representing the variable name, you can use \\code{getNcdfVar} to\nget the basic information about the variables and select the target.}\n\n\\item{tz}{A string representing the time zone, default is GMT, if you know what time zone is \nyou can assign it in the argument. If \\code{tz = ''}, current time zone will be taken.}\n\n\\item{...}{Several arguments including Year, month, lon, lat \ntype in \\code{?downscaleNcdf} for details.You can load while downscale, \nand also first load than use \\code{downscaleNcdf} to downscale.}\n}\n\\value{\nA list object from \\code{hyfo} containing the information to be used in the analysis, \nor biascorrection.\n}\n\\description{\nLoad NetCDF file\n}\n\\examples{\n# First open the test NETcDF file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n\n# Then if you don't know the variable name, you can use \\\\code{getNcdfVar} to get variable name\nvarname <- getNcdfVar(filePath)\n\nnc <- loadNcdf(filePath, varname)\n\n# you can directly add your downscale information to the argument.\nnc1 <- loadNcdf(filePath, varname, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7))\nnc2 <- loadNcdf(filePath, varname, year = 2005, month = 3:8, lon = c(-2, -0.5), \nlat = c(43.2, 43.7))\n\n# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\nEarlier) Format Data Files. R package version 1.14.1.\nhttps://CRAN.R-project.org/package=ncdf4\n\n\\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\nversion 2.2-6. http://meteo.unican.es/ecoms-udg\n}\n}\n\n", + "created" : 1487956431502.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "431613459", + "id" : "3405765E", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/loadNcdf.Rd", + "project_path" : "man/loadNcdf.Rd", + "properties" : { + }, + "relative_order" : 43, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/390DEBE1 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/390DEBE1 new file mode 100644 index 0000000..8a1514f --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/390DEBE1 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "hyfo 1.3.9\n==========\nDate: 2017-2-20\n\n- apply data.table package to facilitate data processing\n\n\n\nhyfo 1.3.8\n==========\nDate: 2017-1-8\n\n- add changes to the new version of roxygen2\n- change biasCorrection's description, change default prThreshold to 0, since not every one is an expert and know how to set it, better keep the original unchanged.\n\n\n\nhyfo 1.3.7\n==========\nDate: 2016-3-1\n\n- add one more argument to plotTS, to cancel the marking of NA values. For some users, NA values are too many to be plotted.\n\n\n\nhyfo 1.3.6\n==========\nDate: 2015-12-15\n\n- transfer from ncdf to ncdf4\n- grepAndMatch created, for capturing dimension names.\n- minor bug fixed about the loadNcdf, when no dimension found, it will give an error indicating.\n- change most of the match function into grepAndMatch, in order to deal with different dimension names.\n- add name attributes to gridfile$xyCoords$x,y, when writeNcdf, the dim names will be taken from that attribute, which can be exactly the same with the original. \n- bug fixed for nc files without members.\n\nNOTE:\n====\n- for hyfo$Data part, when load and write using ncdf4, there will be very little differences compared to the original, which cannot be addressed. If you first load an ncdf file, then write it, then load it again. The data part may have very little difference, less than 10E-5.\n\n\n\nhyfo 1.3.5\n==========\nDate: 2015-12-6\n\n- travis check passed, change the rgdal version from 0.9-3 back to 0.8-16 due to the lack of packages on travis ubuntu.\n- changed .yml file to fix the problem with No repository set, so cyclic dependency check skipped.\n- on CRAN\n- fully supported for windows, Linux and OS.\n\n\n\nhyfo 1.3.3\n==========\nDate: 2015-11-27\n\n- Delete readData_folder, since it's only windows based, add information to get special version for windows users.\n- travis test added.\n\n\nhyfo 1.3.2\n==========\nDate: 2015-11-7\n\n- bug fixed about getPreciBar, signature('data.frame')\n- vignettes updated about bug and hided the warning information.\n- Add how to debug in the documentation for the generic functions.\n\n\n\nhyfo 1.3.1\n==========\nDate: 2015-11-3\n\n- new generic function biasCorrect, extractPeriod, resample, getAnnual, getPreciBar added. No need to designate input type any more, R will detect automatically.\n- coordinates conversion function extracted.\n- new user manual about real time bias correction and resample added.\n\n\n\nhyfo 1.2.9\n==========\nDate: 2015-10-30\n\n- new biasFactor S4 class added, to avoid set the input type every time.\n- operational bias correction has been changed to generic function.\n- news file added.\n\n\n\nhyfo 1.2.8\n==========\nDate: 2015.10.10\n\n- operational bias correction added, in normal function.", + "created" : 1483876849677.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "676496319", + "id" : "390DEBE1", + "lastKnownWriteTime" : 1487522938, + "last_content_update" : 1487522938948, + "path" : "~/GitHub/hyfo/NEWS", + "project_path" : "NEWS", + "properties" : { + }, + "relative_order" : 9, + "source_on_save" : false, + "source_window" : "", + "type" : "text" +} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/B5E420D2 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/47CB7F65 similarity index 91% rename from .Rproj.user/D53FD3E6/sdb/per/t/B5E420D2 rename to .Rproj.user/D1D10CF6/sdb/s-DA33EA29/47CB7F65 index e0ae531..47bcc25 100644 --- a/.Rproj.user/D53FD3E6/sdb/per/t/B5E420D2 +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/47CB7F65 @@ -1,17 +1,20 @@ { + "collab_server" : "", "contents" : "chooseDim <- function(array, dim, value, drop = FALSE) { \n # Create list representing arguments supplied to [\n # bquote() creates an object corresponding to a missing argument\n dimnames <- attributes(array)$dimensions\n \n indices <- rep(list(bquote()), length(dim(array)))\n indices[[dim]] <- value\n \n if (dim(array)[dim] < max(value)) {\n stop('Chosen member exceeds the member range of the dataset.')\n }\n \n # Generate the call to [\n call <- as.call(c(\n list(as.name(\"[\"), quote(array)),\n indices,\n list(drop = drop)))\n # Print it, just to make it easier to see what's going on\n # Print(call)\n \n # Finally, evaluate it\n output <- eval(call)\n \n if (length(dim(output)) == length(dimnames)) {\n attributes(output)$dimensions <- dimnames\n } else if (length(dim(output)) < length(dimnames)){\n \n # In this case, one dimension is dropped, if value is a number \n # and drop == T, this situation can appear. So the dropped dimemsion\n # should be the chosen dimension.\n i <- 1:length(dimnames)\n # get rid of the dropped dimensin\n i <- i[-dim]\n attributes(output)$dimensions <- dimnames[i]\n }\n \n return(output)\n}\n\n\nadjustDim <- function(data, ref = 'no') {\n # input data is an array\n # ref is the Data part of a hyfo file, used as reference\n # Further may be arranged into a seperate function\n # the input reference will be put at first, then the rest \n if (is.null(data)) return(NULL)\n if (identical(ref, 'no')) {\n # Default\n refOrder <- c('lon', 'lat', 'time')\n } else if (is.character(ref)) {\n refOrder <- ref\n } else {\n # Get dimension from input\n refOrder <- attributes(ref)$dimensions\n }\n \n att <- attributes(data)$dimensions\n if (is.null(att)) stop('No dimnames in the input data attributes, please use loadNcdf to load data.')\n if (identical(att, refOrder)) return(data)\n \n dimIndex <- seq(1, length(att))\n dimIndex1 <- grepAndMatch(refOrder, att)# match can apply to simple cases\n \n \n # for array this works, or setdiff can be used here to find the nomatch element.\n dimIndex2 <- dimIndex[-dimIndex1]# choose nomatch\n \n \n data <- aperm(data, c(dimIndex1, dimIndex2))\n attributes(data)$dimensions <- att[c(dimIndex1, dimIndex2)]\n \n return(data)\n}\n\n# Belong to checkDimLength\ncalcuDim <- function(data, dim) {\n dimIndex <- grepAndMatch(dim, attributes(data)$dimensions)\n dimLength <- dim(data)[dimIndex]\n return(dimLength)\n}\n", - "created" : 1449949349073.000, + "created" : 1487525892454.000, "dirty" : false, "encoding" : "ASCII", "folds" : "", "hash" : "2303557273", - "id" : "B5E420D2", - "lastKnownWriteTime" : 1449960012, - "path" : "E:/1/R/hyfo/R/array_dimension.R", + "id" : "47CB7F65", + "lastKnownWriteTime" : 1483875653, + "last_content_update" : 1483875653, + "path" : "~/GitHub/hyfo/R/array_dimension.R", "project_path" : "R/array_dimension.R", "properties" : { }, - "relative_order" : 8, + "relative_order" : 19, "source_on_save" : false, + "source_window" : "", "type" : "r_source" } \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/4821267A b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/4821267A new file mode 100644 index 0000000..67898d5 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/4821267A @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/fillGap.R\n\\name{fillGap}\n\\alias{fillGap}\n\\title{Fill gaps in the rainfall time series.}\n\\usage{\nfillGap(dataset, corPeriod = \"daily\")\n}\n\\arguments{\n\\item{dataset}{A dataframe with first column the time, the rest columns are rainfall data of different gauges}\n\n\\item{corPeriod}{A string showing the period used in the correlation computing, \ne.g. daily, monthly, yearly.}\n}\n\\value{\nThe filled dataframe\n}\n\\description{\nFill gaps in the rainfall time series.\n}\n\\details{\nthe gap filler follows the rules below:\n\n 1. The correlation coefficient of every two columns (except time column) is calculated.\nthe correlation coefficient calculation can be based on 'daily', 'monthly', 'annual',\nin each case, the daily data, the monthly mean daily data and annual mean daily data of \neach column will be taken in the correlation calculation.\n\nThen the correlation matrix is got, then based on the matrix, for each column, \nthe 1st, 2nd, 3rd,... correlated column will be got. So if there is missing value in the\ncolumn, it will get data from orderly 1st, 2nd, 3rd column.\n\n 2. The simple linear regress is calculated between every two columns. When generating the\n linear coefficient, the incept should be force to 0. i.e. y = a*x + b should be forec to \n y = a*x.\n \n 3. Gap filling. E.g., on a certain date, there is a missing value in column A, then the\n correlation order is column B, column C, column D, which means A should take values from\n B firstly, if B is also missing data, then C, then D.\n \n Assuming finally value from column C is taken. Then according to step 2, A = a*C, then the\n final value filled in column A is missing_in_A = a*value_in_C, a is the linear coeffcient.\n}\n\\examples{\nb <- read.table(text = ' Date AAA BBB CCC DDD EEE\n49 1999-12-15 24.8 21.4 25.6 35.0 17.4\n50 1999-12-16 NA 0.6 1.5 6.3 2.5\n51 1999-12-17 NA 16.3 20.3 NA 19.2\n52 1999-12-18 13 1.6 NA 6.3 0.0\n53 1999-12-19 10 36.4 12.5 26.8 24.9\n54 1999-12-20 NA 0.0 0.0 0.2 0.0\n55 1999-12-21 0.2 0.0 0.0 0.0 0.0\n56 1999-12-22 0.0 0.0 0.0 0.0 0.0')\n\nb1 <- fillGap(b) # if corPeriod is missing, 'daily' is taken as default.\n\ndata(testdl)\na <- extractPeriod(testdl, commonPeriod = TRUE)\na1 <- list2Dataframe(a)\na2 <- fillGap(a1)\na3 <- fillGap(a1, corPeriod = 'monthly')\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\nGap fiiling method based on correlation and linear regression.\n\n\\itemize{\n\\item Hirsch, Robert M., et al. \"Statistical analysis of hydrologic data.\" Handbook of hydrology. (1992): 17-1.\nSalas, Jose D. \"Analysis and modeling of hydrologic time series.\" Handbook of hydrology 19 (1993): 1-72.\n\n}\n}\n\n", + "created" : 1487956216135.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "4132485011", + "id" : "4821267A", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/fillGap.Rd", + "project_path" : "man/fillGap.Rd", + "properties" : { + }, + "relative_order" : 28, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/4CAD519F b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/4CAD519F new file mode 100644 index 0000000..989002f --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/4CAD519F @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/analyzeTS.R\n\\name{getLMom}\n\\alias{getLMom}\n\\title{get L moment analysis of the input distribution}\n\\usage{\ngetLMom(dis)\n}\n\\arguments{\n\\item{dis}{A distribution, for hydrology usually a time series with only data column without time.}\n}\n\\value{\nThe mean, L-variation, L-skewness and L-kurtosis of the input distribution\n}\n\\description{\nget L moment analysis of the input distribution\n}\n\\examples{\ndis <- seq(1, 100)\ngetLMom(dis)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item J. R. M. Hosking (2015). L-moments. R package, version 2.5. URL:\nhttps://CRAN.R-project.org/package=lmom.\n}\n}\n\n", + "created" : 1487956293261.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1948723556", + "id" : "4CAD519F", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getLMom.Rd", + "project_path" : "man/getLMom.Rd", + "properties" : { + }, + "relative_order" : 34, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/50AB644E b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/50AB644E new file mode 100644 index 0000000..55298c6 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/50AB644E @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getSpatialMap.R\n\\name{getSpatialMap_comb}\n\\alias{getSpatialMap_comb}\n\\title{Combine maps together}\n\\usage{\ngetSpatialMap_comb(..., list = NULL, nrow = 1, x = \"\", y = \"\",\n title = \"\", output = FALSE)\n}\n\\arguments{\n\\item{...}{different maps generated by \\code{getSpatialMap(, output = 'ggplot')}, see details.}\n\n\\item{list}{If input is a list containing different ggplot data, use \\code{list = inputlist}.}\n\n\\item{nrow}{A number showing the number of rows.}\n\n\\item{x}{A string of x axis name.}\n\n\\item{y}{A string of y axis name.}\n\n\\item{title}{A string of the title.}\n\n\\item{output}{A boolean, if chosen TRUE, the output will be given.}\n}\n\\value{\nA combined map.\n}\n\\description{\nCombine maps together\n}\n\\details{\nFor \\code{getSpatialMap_comb}, the maps to be compared should be with same size and resolution, \nin other words, they should be fully overlapped by each other.\n\nIf they have different resolutions, use \\code{interpGridData{ecomsUDG.Raccess}} to interpolate.\n}\n\\examples{\n\n\n\\dontrun{\ndata(tgridData)# the result of \\\\code{\\\\link{loadNcdf}}\n#The output should be 'ggplot'\na1 <- getSpatialMap(tgridData, method = 'summer', output = 'ggplot', name = 'a1')\na2 <- getSpatialMap(tgridData, method = 'winter', output = 'ggplot', name = 'a2')\na3 <- getSpatialMap(tgridData, method = 'mean', output = 'ggplot', name = 'a3')\na4 <- getSpatialMap(tgridData, method = 'max', output = 'ggplot', name = 'a4')\ngetSpatialMap_comb(a1, a2)\n\n# or you can put them into a list.\ngetSpatialMap_comb(list = list(a1, a2), nrow = 2)\n}\n\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n}\n}\n\n", + "created" : 1487956380629.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1689511535", + "id" : "50AB644E", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getSpatialMap_comb.Rd", + "project_path" : "man/getSpatialMap_comb.Rd", + "properties" : { + }, + "relative_order" : 40, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/52476E6A b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/52476E6A new file mode 100644 index 0000000..8ab2859 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/52476E6A @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "\nreadData_folder <- function(folderName, keyword) {\n \n folderName <- paste(folderName, keyword, sep = '\\\\')\n \n fileNames <- list.files(folderName, pattern = '*.csv', full.names = TRUE)\n \n if (length(fileNames)==0) {\n fileNames <- list.files(folderName, pattern = '.TXT', full.names = TRUE)\n if (length(fileNames)==0) stop('Wrong keyword, initial has to be Upper-case')\n \n data <- collectData_txt_anarbe(folderName, rangeWord = c('D?a ', -1, 'M?x. ', -5))\n rownames(data) <- NULL\n } else {\n data <- collectData_csv_anarbe(folderName)\n }\n \n return(data)\n}\n\n\n# @importFrom utils choose.dir\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n\nreadData <- function(keyword, folderName) {\n message('This function is only windows based, if you are using windows platform (real\n operational system, not virtual machine), and want to use this function, please\n contact the author (xuyuanchao37@gmail.com) for the windows version.')\n# message('Choose the main folder that, in it, there are different folders representing different gauging stations,\n# all the gauging stations have precipitation data, some of them also have discharge data,\n# this function is to open different gauging folders and read the data, arragen them together.')\n# message('\\n\\n\n# new file is a list based file and needs to be read by dget()')\n \n# fileNames <- list.files(folderName, full.names = TRUE)\n# data <- lapply(fileNames, FUN = readData_folder, keyword = keyword)\n# \n# names <- sapply(c(1:length(data)), function(x) colnames(data[[x]])[2])\n# names(data) <- names\n# \n# fileName <- file.choose(new = TRUE)\n# dput(data, file = fileName)\n# \n# return(data)\n}\n", + "created" : 1488018081708.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "392692864", + "id" : "52476E6A", + "lastKnownWriteTime" : 1488018115, + "last_content_update" : 1488018115819, + "path" : "~/GitHub/hyfo/R/readfolders.R", + "project_path" : "R/readfolders.R", + "properties" : { + }, + "relative_order" : 52, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/61DE20F2 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/61DE20F2 new file mode 100644 index 0000000..b40f0ad --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/61DE20F2 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getSpatialMap.R\n\\name{getSpatialMap}\n\\alias{getSpatialMap}\n\\title{Get spatial map of the input dataset.}\n\\usage{\ngetSpatialMap(dataset, method = NULL, member = \"mean\", ...)\n}\n\\arguments{\n\\item{dataset}{A list containing different information, should be the result of reading netcdf file using\n\\code{loadNcdf}.}\n\n\\item{method}{A string showing different calculating method for the map. More information please refer to\ndetails.}\n\n\\item{member}{A number showing which member is selected to get, if the dataset has a \"member\" dimension. Default\nis NULL, if no member assigned, and there is a \"member\" in dimensions, the mean value of the members will be\ntaken.}\n\n\\item{...}{several arguments including x, y, title, catchment, point, output, name, info, scale, color, \ntype in \\code{?getSpatialMap_mat} for details.}\n}\n\\value{\nA matrix representing the raster map is returned, and the map is plotted.\n}\n\\description{\nGet spatial map of the input dataset.\n}\n\\details{\nThere are following methods to be selected, \n\"meanAnnual\": annual rainfall of each year is plotted. \n\"winter\", \"spring\", \"autumn\", \"summer\": MEAN seasonal rainfall of each year is plotted.\nMonth(number 1 to 12): MEAN month rainfall of each year is plotted, e.g. MEAN march rainfall of each year.\n\"mean\", \"max\", \"min\": mean daily, maximum daily, minimum daily precipitation.\n}\n\\examples{\n\n\n\\dontrun{\n#gridData provided in the package is the result of \\\\code {loadNcdf}\ndata(tgridData)\ngetSpatialMap(tgridData, method = 'meanAnnual')\ngetSpatialMap(tgridData, method = 'winter')\n\n\ngetSpatialMap(tgridData, method = 'winter', catchment = testCat)\n\nfile <- system.file(\"extdata\", \"point.txt\", package = \"hyfo\")\npoint <- read.table(file, header = TRUE, sep = ',' )\ngetSpatialMap(tgridData, method = 'winter', catchment = testCat, point = point)\n}\n\n\n# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n\n}\n\n", + "created" : 1487956375440.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3319896544", + "id" : "61DE20F2", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getSpatialMap.Rd", + "project_path" : "man/getSpatialMap.Rd", + "properties" : { + }, + "relative_order" : 39, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6511719A b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6511719A new file mode 100644 index 0000000..7e9b50d --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6511719A @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Fill gaps in the rainfall time series.\n#'\n#' @param dataset A dataframe with first column the time, the rest columns are rainfall data of different gauges\n#' @param corPeriod A string showing the period used in the correlation computing, \n#' e.g. daily, monthly, yearly.\n#' @return The filled dataframe\n#' @details\n#' the gap filler follows the rules below:\n#' \n#' 1. The correlation coefficient of every two columns (except time column) is calculated.\n#' the correlation coefficient calculation can be based on 'daily', 'monthly', 'annual',\n#' in each case, the daily data, the monthly mean daily data and annual mean daily data of \n#' each column will be taken in the correlation calculation.\n#' \n#' Then the correlation matrix is got, then based on the matrix, for each column, \n#' the 1st, 2nd, 3rd,... correlated column will be got. So if there is missing value in the\n#' column, it will get data from orderly 1st, 2nd, 3rd column.\n#' \n#' 2. The simple linear regress is calculated between every two columns. When generating the\n#' linear coefficient, the incept should be force to 0. i.e. y = a*x + b should be forec to \n#' y = a*x.\n#' \n#' 3. Gap filling. E.g., on a certain date, there is a missing value in column A, then the\n#' correlation order is column B, column C, column D, which means A should take values from\n#' B firstly, if B is also missing data, then C, then D.\n#' \n#' Assuming finally value from column C is taken. Then according to step 2, A = a*C, then the\n#' final value filled in column A is missing_in_A = a*value_in_C, a is the linear coeffcient.\n#' \n#' @examples\n#' b <- read.table(text = ' Date AAA BBB CCC DDD EEE\n#' 49 1999-12-15 24.8 21.4 25.6 35.0 17.4\n#' 50 1999-12-16 NA 0.6 1.5 6.3 2.5\n#' 51 1999-12-17 NA 16.3 20.3 NA 19.2\n#' 52 1999-12-18 13 1.6 NA 6.3 0.0\n#' 53 1999-12-19 10 36.4 12.5 26.8 24.9\n#' 54 1999-12-20 NA 0.0 0.0 0.2 0.0\n#' 55 1999-12-21 0.2 0.0 0.0 0.0 0.0\n#' 56 1999-12-22 0.0 0.0 0.0 0.0 0.0')\n#' \n#' b1 <- fillGap(b) # if corPeriod is missing, 'daily' is taken as default.\n#' \n#' data(testdl)\n#' a <- extractPeriod(testdl, commonPeriod = TRUE)\n#' a1 <- list2Dataframe(a)\n#' a2 <- fillGap(a1)\n#' a3 <- fillGap(a1, corPeriod = 'monthly')\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @references\n#' Gap fiiling method based on correlation and linear regression.\n#' \n#' \\itemize{\n#' \\item Hirsch, Robert M., et al. \"Statistical analysis of hydrologic data.\" Handbook of hydrology. (1992): 17-1.\n#' Salas, Jose D. \"Analysis and modeling of hydrologic time series.\" Handbook of hydrology 19 (1993): 1-72.\n#' \n#' }\n#' \n#' \n#' @export\nfillGap <- function(dataset, corPeriod = 'daily') {\n \n if (!grepl('-|/', dataset[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.')\n }\n Date <- as.Date(dataset[, 1])\n data <- data.frame(dataset[, 2:dim(dataset)[2]])\n names <- colnames(data)\n \n corN <- fillGap_cor(data, corPeriod = corPeriod, Date = Date)\n cat('\\nCorrelation Coefficient\\n')\n print(corN)\n \n corOrder <- apply(corN, MARGIN = 1, FUN = function(x) order(-x))\n corOrder <- corOrder[2:dim(corOrder)[1], ]\n corOrderName <- t(apply(corOrder, MARGIN = 2, FUN = function(x) names[x]))\n \n cat('\\nCorrelation Order\\n')\n colnames(corOrderName) <- seq(1 : dim(corOrderName)[2])\n print(corOrderName)\n \n lmCoef <- fillGap_lmCoef(data, corOrder)\n cat('\\nLinear Coefficients\\n')\n rownames(lmCoef) <- seq(1 : dim(corOrderName)[2])\n print(t(lmCoef))\n \n output <- lapply(1:dim(data)[2], fillGap_column, data = data,\n corOrder = corOrder, lmCoef = lmCoef)\n output <- data.frame(output)\n colnames(output) <- names\n \n output <- cbind(Date, output)\n \n return(output)\n}\n\n\n#' Get monthly rainfall\n#' \n#' @param TS A rainfall time series.\n#' @param year A list showing the year index of the time series.\n#' @param mon A list showing the mon index of the time series.\n#' @return the monthly rainfall matrix of the rainfall time series.\nmonthlyPreci <- function(TS, year, mon) {\n \n # monthly daily mean is used in order not to affected by missing values.\n monTS <- tapply(TS, INDEX = list(year, mon), FUN = mean, na.rm = TRUE)\n output <- t(monTS)\n dim(output) <- c(dim(monTS)[1] * dim(monTS)[2], 1)\n return(output)\n}\n\n\nfillGap_column <- function(i, data, corOrder, lmCoef) {\n TS <- data[, i] # extract target column\n l <- dim(data)[2] # length\n \n for (j in 1:l) {\n if (!any(is.na(TS))) break\n NAindex <- which(is.na(TS))\n TS[NAindex] <- round(lmCoef[j, i] * data[NAindex, corOrder[j, i]], 3)\n \n if (j == l) stop('Error: One time consists of all NA values')\n }\n \n return(TS)\n}\n\n\n#' @importFrom stats cor na.omit\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' \n\nfillGap_cor <- function(data, corPeriod = 'daily', Date) {\n \n names <- colnames(data)\n year <- format(Date, '%Y')\n \n if (corPeriod == 'monthly') {\n #based on monthly rainfall\n mon <- format(Date, '%m')\n monthlyPreci <- lapply(data, FUN = monthlyPreci, year = year, mon = mon)\n corData <- do.call('cbind', monthlyPreci)\n } else if (corPeriod == 'yearly') {\n year <- format(Date, '%Y')\n # yearly daily mean is used in order not to affected by missing values.\n annualPreci <- lapply(data, FUN = function(x) tapply(x, INDEX = year, FUN = mean, na.rm = TRUE))\n corData <- do.call('cbind', annualPreci)\n } else if (corPeriod == 'daily') {\n corData <- data\n } else {\n stop('Pleas choose among \"daily\", \"monthly\", \"yearly\".')\n }\n \n corData <- data.frame(na.omit(corData))\n colnames(corData) <- names\n \n corN <- cor(corData)\n \n return(corN)\n \n} \n\n#' @importFrom utils combn\n#' @importFrom stats coef lm\n#' @references \n#' R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' \nfillGap_lmCoef <- function(data, corOrder) {\n l <- dim(data)[2]\n m <- diag(l)# m is the coeficients matrix\n m[lower.tri(m)] <- combn(data, 2, function(x) coef(lm(x[, 2] ~ x[, 1] + 0)))\n tm <- t(m)\n \n tm[lower.tri(tm)] <- combn(data, 2, function(x) coef(lm(x[, 1] ~ x[, 2] + 0)))\n \n m <- t(tm)\n \n lmCoef <- lapply(1 : l, function(x) m[x,corOrder[, x]])\n lmCoef <- do.call('rbind', lmCoef)\n rownames(lmCoef) <- colnames(data)\n \n return(t(lmCoef))\n}\n\n", + "created" : 1487522438368.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "2101638457", + "id" : "6511719A", + "lastKnownWriteTime" : 1488015924, + "last_content_update" : 1488015924965, + "path" : "~/GitHub/hyfo/R/fillGap.R", + "project_path" : "R/fillGap.R", + "properties" : { + }, + "relative_order" : 16, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/666D46C7 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/666D46C7 new file mode 100644 index 0000000..c50e81f --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/666D46C7 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getPreciBar(generic).R\n\\name{getPreciBar_comb}\n\\alias{getPreciBar_comb}\n\\title{Combine bars together}\n\\usage{\ngetPreciBar_comb(..., list = NULL, nrow = 1, x = \"\", y = \"\",\n title = \"\", output = FALSE)\n}\n\\arguments{\n\\item{...}{different barplots generated by \\code{getPreciBar(, output = 'ggplot')}, refer to details.}\n\n\\item{list}{If input is a list containing different ggplot data, use l\\code{list = inputlist}.\nNOTE: yOU HAVE TO PUT A \\code{list = }, before your list.}\n\n\\item{nrow}{A number showing the number of rows.}\n\n\\item{x}{A string of x axis name.}\n\n\\item{y}{A string of y axis name.}\n\n\\item{title}{A string of the title.}\n\n\\item{output}{A boolean, if chosen TRUE, the output will be given.}\n}\n\\value{\nA combined barplot.\n}\n\\description{\nCombine bars together\n}\n\\details{\n..., representing different ouput generated by \\code{getPreciBar(, output = 'ggplot')}, they \nhave to be of the same type, e.g., \n1. Jan precipitation of different years, Feb precipitation of different years, and... \nThey are both monthly precipitation, and they share x axis.\n\n2. Mean monthly precipitation of different dataset. e.g., long term mean monthly precipitation\nand short term mean monthly precipitation. They are both mean monthly precipitation.\n}\n\\examples{\n\ndata(tgridData)# the result of \\\\code{\\\\link{loadNcdf}}\n#output type of getPreciBar() has to be 'ggplot'.\nb1 <- getPreciBar(tgridData, method = 2, output = 'ggplot', name = 'b1')\nb2 <- getPreciBar(tgridData, method = 3, output = 'ggplot', name = 'b2')\n\ngetPreciBar_comb(b1, b2)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n}\n}\n\n", + "created" : 1487956369629.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "360658630", + "id" : "666D46C7", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getPreciBar_comb.Rd", + "project_path" : "man/getPreciBar_comb.Rd", + "properties" : { + }, + "relative_order" : 38, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6DDA2A7B b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6DDA2A7B new file mode 100644 index 0000000..475bdd1 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6DDA2A7B @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' get mean rainfall bar plot of the input dataset or time series.\n#' \n#' get mean rainfall bar plot of the input dataset or time series.\n#' \n#' \n#' @param data A list containing different information, should be the result of reading netcdf file using\n#' \\code{\\link{loadNcdf}}, or a time series, with first column the Date, second the value.\n#' Time series can be an ENSEMBLE containning different members. Than the mean value will be given and the range will be given.\n#' @param method A string showing the calculating method of the input time series. More information\n#' please refer to the details.\n#' @param cell A vector containing the locaton of the cell, e.g. c(2, 3), default is \"mean\", representing\n#' the spatially averaged value. Check details for more information.\n#' @param output A string showing the type of the output, if \\code{output = 'ggplot'}, the returned \n#' data can be used in ggplot and \\code{getPreciBar_comb()}; if \\code{output = 'plot'}, the returned data is the plot containing all \n#' layers' information, and can be plot directly or used in grid.arrange; if not set, the data\n#' will be returned.\n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{getSpatialMap_comb}.\n#' @param plotRange A boolean showing whether the range will be plotted.\n#' @param member A number showing which member is selected to get, if the dataset has a \"member\" dimension. Default\n#' is NULL, if no member assigned, and there is a \"member\" in dimensions, the mean value of the members will be\n#' taken.\n#' @param omitNA A boolean showing whether the missing value is omitted.\n#' @param info A boolean showing whether the information of the map, e.g., max, mean ..., default is FALSE.\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#' @details\n#' There are following methods to be selected, \n#' \"annual\": annual rainfall of each year is plotted. \n#' \"winter\", \"spring\", \"autumn\", \"summer\": seasonal rainfall of each year is plotted.\n#' Month(number 1 to 12): month rainfall of each year is plotted, e.g. march rainfall of each year.\n#' \"meanMonthly\": the mean monthly rainfall of each month over the whole period.\n#' \n#' #Since \"winter\" is a crossing year, 12, 1, 2, 12 is in former year, and 1, 2 are in latter year.\n#' #so winter belongs to the latter year.\n#' \n#' \n#' \\code{cell} representing the location of the cell, NOTE: this location means the index of the cell,\n#' IT IS NOT THE LONGITUDE AND LATITUDE. e.g., \\code{cell = c(2, 3)}, the program will take the 2nd longitude\n#' and 3rd latitude, by the increasing order. Longitude comes first.\n#' \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples\n#' #gridData provided by package is the result of \\code{loadNcdf()}\n#' data(tgridData)\n#' b1 <- getPreciBar(tgridData, method = 'annual')\n#' b2 <- getPreciBar(tgridData, method = 'meanMonthly')\n#' \n#' data(testdl)\n#' TS <- testdl[[1]]\n#' a <- getPreciBar(TS, method = 'spring')\n#' # if info = T, the information will be given at the bottom.\n#' a <- getPreciBar(TS, method = 'spring', info = TRUE)\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \n#' \n#' \\itemize{\n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#'\n#' \n#' @return The calculated mean value of the input time series and the plot of the result.\n#' @export\nsetGeneric('getPreciBar', function(data, method, cell = 'mean', output = 'data', name = NULL, \n plotRange = TRUE, member = NULL, omitNA = TRUE, info = FALSE,\n ...) {\n standardGeneric('getPreciBar')\n})\n\n#' @rdname getPreciBar\nsetMethod('getPreciBar', signature('list'), \n function(data, method, cell, output, name, plotRange, member, omitNA, info, ...) {\n TS <- getPreciBar.list(data, cell, member)\n # for hyfo file, in order to process the data, year and month index need to be provided.\n startTime <- as.POSIXlt(data$Dates$start, tz = 'GMT')\n yearIndex <- startTime$year + 1900\n monthIndex <- startTime$mon + 1\n \n result <- getPreciBar.plot(TS, method, output, name, plotRange, omitNA, info, yearIndex,\n monthIndex, ...)\n return(result)\n})\n\n#' @rdname getPreciBar\nsetMethod('getPreciBar', signature('data.frame'), \n function(data, method, cell, output, name, plotRange, member, omitNA, info, ...) {\n Date <- as.POSIXlt(TS[, 1])\n yearIndex <- Date$year + 1900\n monthIndex <- Date$mon + 1\n TS <- getPreciBar.TS(data)\n result <- getPreciBar.plot(TS, method, output, name, plotRange, omitNA, info, \n yearIndex, monthIndex, ...)\n return(result)\n})\n\n\ngetPreciBar.list <- function(dataset, cell, member) {\n #check input dataset\n checkHyfo(dataset)\n \n data <- dataset$Data\n \n # Dimension needs to be arranged. Make sure first and second dimension is lat and lon.\n data <- adjustDim(data, ref = c('lon', 'lat', 'time'))\n \n # Because in the following part, only 3 dimensions are allowed, so data has to be processed.\n if (is.null(member) & any(attributes(data)$dimensions == 'member')) {\n dimIndex3 <- which(attributes(data)$dimensions != 'member')\n data <- apply(data, MARGIN = dimIndex3, FUN = mean, na.rm = TRUE)\n } else if (!is.null(member) & any(attributes(data)$dimensions == 'member')) {\n dimIndex3 <- which(attributes(data)$dimensions == 'member')\n data <- chooseDim(data, dimIndex3, member, drop = TRUE)\n } else if (!is.null(member) & !any(attributes(data)$dimensions == 'member')){\n stop('There is no member part in the dataset, but you choose one, check the input\n dataset or change your arguments.')\n }\n \n if (identical(cell, 'mean')) {\n TS <- apply(data, MARGIN = 3, FUN = mean, na.rm = TRUE) \n } else {\n TS <- data[cell[1], cell[2], ]\n }\n \n return(TS)\n}\n\n\n#' @importFrom reshape2 melt\ngetPreciBar.TS <- function(TS) {\n \n# Date <- as.POSIXlt(TS[, 1])\n# yearIndex <- Date$year + 1900\n# monthIndex <- Date$mon + 1\n n <- ncol(TS) - 1\n \n if ( n == 1) {\n TS <- TS[, 2]\n } else {\n \n TS <- TS[, -1]\n # month index should be repeat, but years cannot.\n# yearIndex <- sapply(1:n, function(x) yearIndex + x - 1)\n# dim(yearIndex) <- c(n * nrow(yearIndex), 1)\n \n# monthIndex <- rep(monthIndex, n)\n TS <- melt(TS)[, 2]\n \n }\n return(TS)\n}\n\n\n#' @importFrom stats median\n#' @importFrom reshape2 melt\n#' @import ggplot2\ngetPreciBar.plot <- function(TS, method, output, name, plotRange, omitNA, info, \n yearIndex = NULL, monthIndex = NULL, ...) {\n \n \n if (method == 'meanMonthly') {\n \n monthlyPreci <- tapply(TS, INDEX = list(yearIndex, monthIndex), FUN = sum, na.rm = omitNA)\n meanMonthlyPreci <- apply(monthlyPreci, MARGIN = 2, FUN = mean, na.rm = TRUE)\n \n \n title <- 'Mean Monthly Precipitation'\n xlab <- 'Month'\n \n plotPreci <- data.frame(Index = month.abb[as.numeric(colnames(monthlyPreci))], \n Preci = meanMonthlyPreci)\n \n # Here factor has to be reassigned, to keep the original order, or it will be reordered.\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n if (plotRange) {\n maxValue <- apply(monthlyPreci, MARGIN = 2, FUN = max, na.rm = TRUE)\n minValue <- apply(monthlyPreci, MARGIN = 2, FUN = min, na.rm = TRUE)\n \n plotPreci$maxValue <- maxValue\n plotPreci$minValue <- minValue\n \n ylim <- c(0,max(maxValue, na.rm = TRUE) * 1.1)\n \n } else {\n ylim <- c(0,max(meanMonthlyPreci, na.rm = TRUE) * 1.1)\n }\n \n \n } else if (method == 'annual') { \n \n if (length(unique(monthIndex)) < 12) {\n warning ('There are less than 12 months in a year, the results may be inaccurate.')\n }\n \n annualPreci <- tapply(TS, INDEX = yearIndex, FUN = sum, na.rm = TRUE)\n title <- 'Annual Precipitation'\n xlab <- 'Year'\n plotName <- names(annualPreci)\n \n plotPreci <- data.frame(Index = names(annualPreci), Preci = annualPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n ylim <- c(0, max(annualPreci, na.rm = TRUE) * 1.1)\n \n } else if (is.numeric(method)) {\n month <- method\n monExisting <- length(which(unique(monthIndex) == month))\n if (monExisting == 0) stop(\"Your input month doesn't exist in the dataset.\")\n \n monthlyPreci <- getMeanPreci(TS, method = month, yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n # If monthlyPreci length is 1, names need to be added.\n if (length(monthlyPreci) == 1) names(monthlyPreci) <- unique(yearIndex)\n plotPreci <- data.frame(Index = names(monthlyPreci), Preci = monthlyPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste(month.abb[month], 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(monthlyPreci, na.rm = TRUE) * 1.1)\n \n } else if (method == 'spring') { \n \n wm <- match(c(3, 4, 5), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Spring has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'spring', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Spring', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n \n } else if (method == 'summer') {\n \n wm <- match(c(6, 7, 8), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Summer has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'summer', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Summer', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n \n } else if (method == 'autumn') {\n wm <- match(c(9, 10, 11), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Autumn has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'autumn', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Autumn', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n } else if (method == 'winter') {\n wm <- match(c(12, 1, 2), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Winter has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'winter', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Winter', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n } else {\n stop(paste('No method called \"', method, '\", check help for information'))\n }\n \n \n xlim <- c(0, length(rownames(plotPreci))) \n \n if (info == TRUE) {\n meanValue <- round(mean(plotPreci$Preci, na.rm = TRUE), 2)\n medianValue <- round(median(plotPreci$Preci,na.rm = TRUE), 2)\n plotMean <- paste('Mean', ' = ', meanValue)\n plotMedian <- paste('Median', ' = ', medianValue)\n \n plotMax <- round(max(plotPreci$Preci, na.rm = TRUE), 2)\n plotMin <- round(min(plotPreci$Preci, na.rm = TRUE), 2)\n word <- paste('\\n\\n', paste(' Max', '=', plotMax), ',', paste('Min', '=', plotMin), ',',\n plotMean, ',', plotMedian)\n } else word <- NULL\n \n \n xlab <- paste(xlab, word)\n \n theme_set(theme_bw())\n \n mainLayer <- with(plotPreci, {\n ggplot(plotPreci) +\n geom_bar(aes(x = Index, y = Preci), stat = 'identity', colour = 'black', fill = 'cyan2', width = rel(.4)) +\n xlab(xlab) +\n ylab('Precipitation (mm)') +\n ggtitle(title) +\n labs(empty = NULL, ...) +#in order to pass \"...\", arguments shouldn't be empty.\n theme(plot.title = element_text(size = rel(1.6), face = 'bold'),\n axis.title.x = element_text(size = rel(1.6)),\n axis.title.y = element_text(size = rel(1.6)),\n axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1.9)),\n axis.text.y = element_text(size = rel(1.9)))\n # geom_text(x = min(xlim) + 0.95 * (max(xlim) - min(xlim)), y = min(ylim) + 0.15 * (max(ylim) - min(ylim)),\n # label = word)+\n # geom_hline(yintercept = meanValue) +\n # geom_text(x = min(xlim) + 0.3 * (max(xlim) - min(xlim)), y = meanValue + 3, vjust = 0, label = 'mean') +\n # geom_hline(yintercept = medianValue, colour = 'red') +\n # geom_text(x = min(xlim) + 0.6 * (max(xlim) - min(xlim)), y = medianValue + 3, vjust = 0,\n # label = 'median', colour = 'red')\n })\n \n \n if (plotRange) {\n if (is.null(plotPreci$maxValue)) {\n message('There is no plotRange for this method')\n print(mainLayer)\n } else {\n rangeLayer <- with(plotPreci, {\n geom_errorbar(aes(x = Index, ymax = maxValue, ymin = minValue), width = rel(0.3))\n }) \n print(mainLayer + rangeLayer)\n }\n \n } else {\n print(mainLayer)\n } \n \n if (output == 'plot') {\n return(mainLayer)\n } else if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n plotPreci$Name <- rep(name, dim(plotPreci)[1])\n return(plotPreci)\n } else {\n return(plotPreci)\n }\n}\n\n\n\n\n\n\n\n#' Combine bars together\n#' @param ... different barplots generated by \\code{getPreciBar(, output = 'ggplot')}, refer to details.\n#' @details\n#' ..., representing different ouput generated by \\code{getPreciBar(, output = 'ggplot')}, they \n#' have to be of the same type, e.g., \n#' 1. Jan precipitation of different years, Feb precipitation of different years, and... \n#' They are both monthly precipitation, and they share x axis.\n#' \n#' 2. Mean monthly precipitation of different dataset. e.g., long term mean monthly precipitation\n#' and short term mean monthly precipitation. They are both mean monthly precipitation.\n#' \n#' @param nrow A number showing the number of rows.\n#' @param list If input is a list containing different ggplot data, use l\\code{list = inputlist}.\n#' NOTE: yOU HAVE TO PUT A \\code{list = }, before your list.\n#' @param x A string of x axis name.\n#' @param y A string of y axis name.\n#' @param title A string of the title.\n#' @param output A boolean, if chosen TRUE, the output will be given.\n#' @return A combined barplot.\n#' @examples\n#' \n#' data(tgridData)# the result of \\code{\\link{loadNcdf}}\n#' #output type of getPreciBar() has to be 'ggplot'.\n#' b1 <- getPreciBar(tgridData, method = 2, output = 'ggplot', name = 'b1')\n#' b2 <- getPreciBar(tgridData, method = 3, output = 'ggplot', name = 'b2')\n#' \n#' getPreciBar_comb(b1, b2)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @import ggplot2\n#' @importFrom data.table rbindlist\n#' @references \n#' \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' }\n#' \ngetPreciBar_comb <- function(..., list = NULL, nrow = 1, x = '', y = '', title = '', output = FALSE) {\n if (!is.null(list)) {\n data_ggplot <- rbindlist(list)\n } else {\n \n bars <- list(...)\n checkBind(bars, 'rbind')\n data_ggplot <- rbindlist(bars)\n }\n \n if (!class(data_ggplot)[1] == 'data.table') {\n warning('Your input is probably a list, but you forget to add \"list = \" before it.\n Try again, or check help for more information.')\n } else if (is.null(data_ggplot$Name)) {\n stop('No \"Name\" column in the input data, check the arguments in getPreciBar(), if \n output = \"ggplot\" is assigned, more info please check ?getPreciBar.')\n }\n \n data_ggplot$Name <- factor(data_ggplot$Name, levels = unique(data_ggplot$Name), ordered = TRUE)\n \n theme_set(theme_bw())\n \n mainLayer <- with(data_ggplot, {\n ggplot(data_ggplot) +\n geom_bar(aes(x = Index, y = Preci),fill = 'cyan2', stat = 'identity', \n colour = 'black', width = rel(.4)) +\n facet_wrap( ~ Name, nrow = nrow) +\n theme(plot.title = element_text(size = rel(1.6), face = 'bold'),\n axis.title.x = element_text(size = rel(1.6)),\n axis.title.y = element_text(size = rel(1.6)),\n axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1.9)),\n axis.text.y = element_text(size = rel(1.9))) +\n labs(x = x, y = y, title = title)\n })\n \n if (!any(is.na(match(c('minValue', 'maxValue'), colnames(data_ggplot))))) {\n rangeLayer <- with(data_ggplot, {\n geom_errorbar(aes(x = Index, ymax = maxValue, ymin = minValue), width = rel(0.3))\n }) \n mainLayer <- mainLayer + rangeLayer\n }\n \n \n suppressWarnings(print(mainLayer))\n \n if (output == TRUE) return(data_ggplot)\n }\n\n", + "created" : 1483876772702.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "399598286", + "id" : "6DDA2A7B", + "lastKnownWriteTime" : 1488015924, + "last_content_update" : 1488015924767, + "path" : "~/GitHub/hyfo/R/getPreciBar(generic).R", + "project_path" : "R/getPreciBar(generic).R", + "properties" : { + }, + "relative_order" : 7, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/67AB4AD7 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6FE223B similarity index 82% rename from .Rproj.user/D53FD3E6/sdb/per/t/67AB4AD7 rename to .Rproj.user/D1D10CF6/sdb/s-DA33EA29/6FE223B index d2fe7b0..e04b29e 100644 --- a/.Rproj.user/D53FD3E6/sdb/per/t/67AB4AD7 +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/6FE223B @@ -1,18 +1,20 @@ { - "contents" : "# \n#' An S4 class, representing the biasFactor of single time series biasCorrection.\n#' @slot biasFactor list of biasFactor, containing all the information for computing.\n#' @slot method the biascorrection method\n#' @slot preci if the data is precipitation\n#' @slot scaleType 'Valid when 'scaling' method is selected, 'multi' or 'add'.\n#' @slot extrapolate Valid when 'eqm' method is selected, 'constant' or 'no'\n#' @slot memberDim members contained.\n#' @slot prThreshold precipitation threshold, under which the precipitation is considered as 0.\n#' @exportClass biasFactor\n#' @importFrom methods setClass\nsetClass(\"biasFactor\", representation(biasFactor = 'list', method = 'character', preci = 'logical', prThreshold = 'numeric',\n scaleType = 'character', extrapolate = 'character', memberDim = 'numeric'), \n validity = checkBiasFactor, \n prototype(memberDim = 1))\n# \n# \n#' An S4 class, representing the biasFactor of hyfo file.\n#' @slot lonLatDim lists of biasFactor\n#' @inheritParams biasFactor\nsetClass(\"biasFactor.hyfo\", representation(lonLatDim = 'integer'), contains = 'biasFactor', \n validity = checkBiasFactor.hyfo)\n\n\n\n\n\n\n# aa <- new('biasFactor', biasFactor = biasFactor[[1]], method = biasFactor$method, preci = biasFactor$preci, prThreshold = biasFactor$prThreshold,\n# scaleType = biasFactor$scaleType, extrapolate = biasFactor$extrapolate)\n\n# a <- new('biasFactor.multiMember', biasFactor = biasFactor[[1]], memberDim = biasFactor$memberDim,\n# method = biasFactor$method, preci = biasFactor$preci, prThreshold = biasFactor$prThreshold,\n# scaleType = biasFactor$scaleType, extrapolate = biasFactor$extrapolate, input = biasFactor$input)\n# \n# a <- new('biasFactor.hyfo.multiMember', biasFactor = biasFactor[[1]], memberDim = biasFactor$memberDim, lonLatDim = biasFactor$lonLatDim,\n# method = biasFactor$method, preci = biasFactor$preci, prThreshold = biasFactor$prThreshold,\n# scaleType = biasFactor$scaleType, extrapolate = biasFactor$extrapolate, input = biasFactor$input)\n# \n\n\n\n\n\n\n\n##### For hyfo class\n\n###### hyfo\n\n# Since hyfo has to inateract with other packages like downscaleR,\n# If particular class is defined, other packages may not be able to use the object.\n# So, for grid file, just keep it the list file. In future, if interpolate is added,\n# grid file may become a special class.\n\n# \n# \n# \n# checkHyfo <- function(object) {\n# errors <- character()\n# if (length(object@varName) == 0) {\n# msg <- 'hyfo must have a varName.'\n# errors <- c(errors, msg)\n# }\n# \n# if (length(object@xyCoords) != 2) {\n# msg <- 'hyfo must have x and y coordinats, stored in xyCooords.'\n# errors <- c(errors, msg)\n# }\n# \n# if (length(object@Data) == 0) {\n# msg <- 'hyfo must have a Data part, storing data.'\n# errors <- c(errors, msg)\n# } else {\n# validDim <- na.omit(match(c('lon', 'lat', 'time'),attributes(object@Data)$dimensions))\n# if (length(validDim) != 3) {\n# msg <- paste('Data should have at least dimensions \"lon\", \"lat\", \"time\".', '\\n',\n# 'Your input data has dimensions ', attributes(object@Data)$dimensions, sep = '')\n# errors <- c(errors, msg)\n# }\n# }\n# if (length(errors) == 0) TRUE else errors\n# }\n# \n# checkHyfo.multiMember <- function(object) {\n# errors <- character()\n# if (length(object@Members) == 0) {\n# msg <- 'Members names missing.'\n# errors <- c(errors, msg)\n# }\n# \n# memDim <- match('member', attributes(object@Data)$dimensions)\n# if (is.na(memDim)) {\n# msg <- 'Members dimension missing.'\n# errors <- c(errors, msg)\n# }\n# \n# if (length(errors) == 0) TRUE else errors\n# }\n\n\n\n\n\n# #' An S4 class representing the grid file loaded from netCDF file.\n# #' @slot varName the name of the varialbe of the hyfo object.\n# #' @slot xyCoords A list file containing longitude and latitude coordinates.\n# #' @slot Dates A list containing Date information.\n# #' @slot Data An array containing the data.\n# #' @slot Loaded An character showing the loading information. \n# #' @exportClass \n# setClass(\"hyfo\", representation(varName = \"character\", xyCoords = 'list', Dates = 'list',\n# Data = 'array', Loaded = 'character'),\n# prototype(Loaded = 'by hyfo package, http://yuanchao-xu.github.io/hyfo/'),\n# validity = checkHyfo)\n# \n# \n# #' An S4 class representing the multi-member grid file loaded from netCDF file.\n# #' @slot Members showing the name of the members.\n# #' @exportClass \n# setClass('hyfo.multiMember', representation(Members = 'array'), contains = 'hyfo',\n# validity = checkHyfo.multiMember)\n\n\n\n\n# \n# a <- new(\"hyfo\", varName = \"pr\", xyCoords = tgridData$xyCoords, Dates = tgridData$Dates, Data = tgridData$Data)\n# \n# a <- new(\"hyfo.multiMember\", varName = \"pr\", xyCoords = nc$xyCoords, Dates = nc$Dates, Data = nc$Data,\n# Members = nc$Members, Loaded = nc$Loaded)\n\n", - "created" : 1449959862664.000, + "collab_server" : "", + "contents" : "# \n#' An S4 class, representing the biasFactor of single time series biasCorrection.\n#' @slot biasFactor list of biasFactor, containing all the information for computing.\n#' @slot method the biascorrection method\n#' @slot preci if the data is precipitation\n#' @slot scaleType 'Valid when 'scaling' method is selected, 'multi' or 'add'.\n#' @slot extrapolate Valid when 'eqm' method is selected, 'constant' or 'no'\n#' @slot memberDim members contained.\n#' @slot prThreshold precipitation threshold, under which the precipitation is considered as 0.\n#' @exportClass biasFactor\n#' @importFrom methods setClass\nsetClass(\"biasFactor\", representation(biasFactor = 'list', method = 'character', preci = 'logical', prThreshold = 'numeric',\n scaleType = 'character', extrapolate = 'character', memberDim = 'numeric'), \n validity = checkBiasFactor, \n prototype(memberDim = 1))\n# \n# \n#' An S4 class, representing the biasFactor of hyfo file.\n#' @slot lonLatDim lists of biasFactor\n#' @inheritParams biasFactor\nsetClass(\"biasFactor.hyfo\", representation(lonLatDim = 'integer'), contains = 'biasFactor', \n validity = checkBiasFactor.hyfo)\n\n\n\n\n\n\n# aa <- new('biasFactor', biasFactor = biasFactor[[1]], method = biasFactor$method, preci = biasFactor$preci, prThreshold = biasFactor$prThreshold,\n# scaleType = biasFactor$scaleType, extrapolate = biasFactor$extrapolate)\n\n# a <- new('biasFactor.multiMember', biasFactor = biasFactor[[1]], memberDim = biasFactor$memberDim,\n# method = biasFactor$method, preci = biasFactor$preci, prThreshold = biasFactor$prThreshold,\n# scaleType = biasFactor$scaleType, extrapolate = biasFactor$extrapolate, input = biasFactor$input)\n# \n# a <- new('biasFactor.hyfo.multiMember', biasFactor = biasFactor[[1]], memberDim = biasFactor$memberDim, lonLatDim = biasFactor$lonLatDim,\n# method = biasFactor$method, preci = biasFactor$preci, prThreshold = biasFactor$prThreshold,\n# scaleType = biasFactor$scaleType, extrapolate = biasFactor$extrapolate, input = biasFactor$input)\n# \n\n\n\n\n\n\n\n##### For hyfo class\n\n###### hyfo\n\n# Since hyfo has to inateract with other packages like downscaleR,\n# If particular class is defined, other packages may not be able to use the object.\n# So, for grid file, just keep it the list file. In future, if interpolate is added,\n# grid file may become a special class.\n\n# \n# \n# \n# checkHyfo <- function(object) {\n# errors <- character()\n# if (length(object@varName) == 0) {\n# msg <- 'hyfo must have a varName.'\n# errors <- c(errors, msg)\n# }\n# \n# if (length(object@xyCoords) != 2) {\n# msg <- 'hyfo must have x and y coordinats, stored in xyCooords.'\n# errors <- c(errors, msg)\n# }\n# \n# if (length(object@Data) == 0) {\n# msg <- 'hyfo must have a Data part, storing data.'\n# errors <- c(errors, msg)\n# } else {\n# validDim <- na.omit(match(c('lon', 'lat', 'time'),attributes(object@Data)$dimensions))\n# if (length(validDim) != 3) {\n# msg <- paste('Data should have at least dimensions \"lon\", \"lat\", \"time\".', '\\n',\n# 'Your input data has dimensions ', attributes(object@Data)$dimensions, sep = '')\n# errors <- c(errors, msg)\n# }\n# }\n# if (length(errors) == 0) TRUE else errors\n# }\n# \n# checkHyfo.multiMember <- function(object) {\n# errors <- character()\n# if (length(object@Members) == 0) {\n# msg <- 'Members names missing.'\n# errors <- c(errors, msg)\n# }\n# \n# memDim <- match('member', attributes(object@Data)$dimensions)\n# if (is.na(memDim)) {\n# msg <- 'Members dimension missing.'\n# errors <- c(errors, msg)\n# }\n# \n# if (length(errors) == 0) TRUE else errors\n# }\n\n\n\n\n\n# #' An S4 class representing the grid file loaded from netCDF file.\n# #' @slot varName the name of the varialbe of the hyfo object.\n# #' @slot xyCoords A list file containing longitude and latitude coordinates.\n# #' @slot Dates A list containing Date information.\n# #' @slot Data An array containing the data.\n# #' @slot Loaded An character showing the loading information. \n# #' @exportClass \n# setClass(\"hyfo\", representation(varName = \"character\", xyCoords = 'list', Dates = 'list',\n# Data = 'array', Loaded = 'character'),\n# prototype(Loaded = 'by hyfo package, https://yuanchao-xu.github.io/hyfo/'),\n# validity = checkHyfo)\n# \n# \n# #' An S4 class representing the multi-member grid file loaded from netCDF file.\n# #' @slot Members showing the name of the members.\n# #' @exportClass \n# setClass('hyfo.multiMember', representation(Members = 'array'), contains = 'hyfo',\n# validity = checkHyfo.multiMember)\n\n\n\n\n# \n# a <- new(\"hyfo\", varName = \"pr\", xyCoords = tgridData$xyCoords, Dates = tgridData$Dates, Data = tgridData$Data)\n# \n# a <- new(\"hyfo.multiMember\", varName = \"pr\", xyCoords = nc$xyCoords, Dates = nc$Dates, Data = nc$Data,\n# Members = nc$Members, Loaded = nc$Loaded)\n\n", + "created" : 1488015182774.000, "dirty" : false, "encoding" : "ASCII", "folds" : "", - "hash" : "3466314913", - "id" : "67AB4AD7", - "lastKnownWriteTime" : 1446235115, - "path" : "E:/1/R/hyfo/R/classes.R", + "hash" : "2581869872", + "id" : "6FE223B", + "lastKnownWriteTime" : 1488015188, + "last_content_update" : 1488015188093, + "path" : "~/GitHub/hyfo/R/classes.R", "project_path" : "R/classes.R", "properties" : { - "tempName" : "Untitled1" }, - "relative_order" : 11, + "relative_order" : 50, "source_on_save" : false, + "source_window" : "", "type" : "r_source" } \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/80C39737 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/80C39737 new file mode 100644 index 0000000..e65cbe9 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/80C39737 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "---\ntitle: '[hyfo Easy Start](https://yuanchao-xu.github.io/hyfo/)'\nauthor: '[Yuanchao Xu](https://dk.linkedin.com/in/xuyuanchao37)'\ndate: '`r Sys.Date()`'\noutput: \n pdf_document:\n toc: yes\n toc_depth: 3\n html_document:\n toc: yes\nvignette: > \n %\\VignetteIndexEntry{hyfo easy start} \n %\\VignetteEngine{knitr::rmarkdown}\n %\\VignetteEncoding{ASCII}\n---\n\n# Introduction\n\n**Official Website is [https://yuanchao-xu.github.io/hyfo](http://yuanchao-xu.github.io/hyfo), where manuals and more details can be found.**\n\nhyfo is an R package, initially designed for the European Project EUPORIAS, and cooperated with DHI Denmark, which was then extended to other uses in hydrology, hydraulics and climate.\n\nThis package mainly focuses on data process and visulization in hydrology and climate forecasting. Main function includes NetCDF file processing, data extraction, data downscaling, data resampling, gap filler of precipitation, bias correction of forecasting data, flexible time series plot, and spatial map generation. It is a good pre-processing and post-processing tool for hydrological and hydraulic modellers.\n\n**If you feel hyfo is of a little help, please cite it as following:**\n\nXu, Yuanchao(2015). hyfo: Hydrology and Climate Forecasting R Package for Data Analysis and Visualization. Retrieved from https://yuanchao-xu.github.io/hyfo/\n\n\n#### TIPS\n* For the hydrology tools part, the minimum time unit is a day, i.e., it mainly focuses on water resource and some long term analysis. For flood analysis part, it will be added in future.\n\n\n* One important characteristic by which hyfo can be distinguished from others is its convenience in multiple plots and series plots. Most data visualization tool in hyfo provides the output that can be directly re-plot by `ggplot2`, if `output = 'ggplot'` is assigned in the argument of the function, which will be easier for the users to generated series/multiple plots afterwards. When `output = 'ggplot'` is selected, you also have to assigne a `name = 'yourname'` in the argument, for the convenience of generating multiplots in future. All the functions ending with `_comb` can generated series/multiple plots, details can be found in the user mannual. \n\n\n* For the forecasting tools part, `hyfo` mainly focuses on the post processing of the gridData derived from forecasts or other sources. The input is a list file, usually an NetCDF file. There are `getNcdfVar()`, `loadNcdf()` and `writeNcdf()` prepared in hyfo, for you to deal with NetCDF file. \n\n* If you don't like the tile, x axis, y axis of the plot, just set them as '', e.g. `title = ''`\n\n* For R beginners, R provides different functions to write to file. `write.table` is a popular choice, and after write the results to a file, you can directly copy paste to your model or to other uses.\n\n* The functions end with `_anarbe` are the functions designed specially for some case in Spain, those functions mostly are about data collection of the anarbe catchment, which will be introduced in the end of this mannual.\n", + "created" : 1488018162434.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1977312637", + "id" : "80C39737", + "lastKnownWriteTime" : 1488018183, + "last_content_update" : 1488018183932, + "path" : "~/GitHub/hyfo/vignettes/hyfo.Rmd", + "project_path" : "vignettes/hyfo.Rmd", + "properties" : { + }, + "relative_order" : 56, + "source_on_save" : false, + "source_window" : "", + "type" : "r_markdown" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/819D4E19 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/819D4E19 new file mode 100644 index 0000000..9f9767e --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/819D4E19 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/collectData.R\n\\name{collectData}\n\\alias{collectData}\n\\title{Collect data from different csv files.}\n\\usage{\ncollectData(folderName, fileType = NULL, range = NULL, sheetIndex = 1)\n}\n\\arguments{\n\\item{folderName}{A string showing the path of the folder holding different csv files.}\n\n\\item{fileType}{A string showing the file type, e.g. \"txt\", \"csv\", \"excel\".}\n\n\\item{range}{A vector containing startRow, endRow, startColumn, endColumn, e.g., \nc(2,15,2,3)}\n\n\\item{sheetIndex}{A number showing the sheetIndex in the excel file, if fileType is excel,\nsheetIndex has to be provided, default is 1.}\n}\n\\value{\nThe collected data from different files in the folder.\n}\n\\description{\nCollect data from different csv files.\n}\n\\examples{\n\n#use internal data as an example.\nfolder <- file.path(path.package(\"hyfo\"), 'extdata')\n# file may vary with different environment, it if doesn't work, use local way to get\n# folder path.\n\na <- collectData(folder, fileType = 'csv', range = c(10, 20, 1,2))\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\n", + "created" : 1487955954255.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1044217710", + "id" : "819D4E19", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/collectData.Rd", + "project_path" : "man/collectData.Rd", + "properties" : { + }, + "relative_order" : 22, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/882400E4 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/882400E4 new file mode 100644 index 0000000..0527428 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/882400E4 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "\n\n\n\n#' Get bias factor for multi/operational/real time bias correction.\n#' \n#' When you do multi/operational/real time bias correction. It's too expensive\n#' to input hindcast and obs every time. Especially when you have a long period of hindcast\n#' and obs, but only a short period of frc, it's too unecessary to read and compute hindcast\n#' and obs everytime. Therefore, biasFactor is designed. Using \\code{getBiasFactor}, you can\n#' get the biasFactor with hindcast and observation, then you can use \\code{applyBiasFactor} to \n#' apply the biasFactor to different forecasts. \n#' \n#' @param hindcast a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \n#' representing the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as\n#' observation data. Check details for more information.\n#' @param obs a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the observation data.\n#' @param method bias correct method, including 'delta', 'scaling'...,default method is 'scaling'.\n#' @param scaleType only when the method \"scaling\" is chosen, scaleType will be available. Two different types\n#' of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method, default is 'multi'. More info check \n#' details.\n#' @param preci If the precipitation is biascorrected, then you have to assign \\code{preci = TRUE}. Since for\n#' precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. \n#' Default is FALSE, refer to details.\n#' @param prThreshold The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).\n#' @param extrapolate When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs.\n#' If 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.\n#' \n#' @seealso \\code{\\link{biasCorrect}} for method used in bias correction.\n#' \\code{\\link{applyBiasFactor}}, for the second part.\n#' \n#' @details \n#' \n#' Information about the method and how biasCorrect works can be found in \\code{\\link{biasCorrect}}\n#' \n#' \\strong{why use biasFactor}\n#' \n#' As for forecasting, for daily data, there is usually no need to have\n#' different bias factor every different day. You can calculate one bisa factor using a long\n#' period of hindcast and obs, and apply that factor to different frc.\n#' \n#' For example,\n#' \n#' You have 10 years of hindcast and observation. you want to do bias correction for some \n#' forecasting product, e.g. system 4. For system 4, each month, you will get a new forecast\n#' about the future 6 months. So if you want to do the real time bias correction, you have to\n#' take the 10 years of hindcast and observation data with you, and run \\code{biasCorrect} every\n#' time you get a new forecast. That's too expensive.\n#' \n#' For some practical use in forecasting, there isn't a so high demand for accuracy. E.g.,\n#' Maybe for February and March, you can use the same biasFactor, no need to do the computation \n#' again. \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method. \n#' \n#' @examples \n#' \n#' ######## hyfo grid file biascorrection\n#' ########\n#' \n#' # If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect\n#' # the file.\n#' \n#' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' data(tgridData)\n#' # Since the example data, has some NA values, the process will include some warning #message, \n#' # which can be ignored in this case.\n#' \n#' \n#' \n#' # Then we will use nc data as forecasting data, and use itself as hindcast data,\n#' # use tgridData as observation.\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData)\n#' newFrc <- applyBiasFactor(nc, biasFactor)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant',\n#' preci = TRUE)\n#' # This method needs obs input.\n#' newFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE)\n#' newFrc <- applyBiasFactor(nc, biasFactor) \n#' \n#' \n#' ######## Time series biascorrection\n#' ########\n#' \n#' # Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\n#' data(testdl)\n#' \n#' # common period has to be extracted in order to better train the forecast.\n#' \n#' datalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' frc <- datalist[[1]]\n#' hindcast <- datalist[[2]]\n#' obs <- datalist[[3]]\n#' \n#' \n#' # The data used here is just for example, so there could be negative data.\n#' \n#' # default method is scaling\n#' biasFactor <- getBiasFactor(hindcast, obs)\n#' frc_new <- applyBiasFactor(frc, biasFactor)\n#' \n#' # for precipitation data, extra process needs to be executed, so you have to tell\n#' # the program to it is a precipitation data.\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, preci = TRUE)\n#' frc_new1 <- applyBiasFactor(frc, biasFactor)\n#' \n#' # You can use other methods to biascorrect, e.g. delta method. \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'delta')\n#' # delta method needs obs input.\n#' frc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' # \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE)\n#' # eqm needs obs input\n#' frc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'gqm', preci = TRUE)\n#' frc_new4 <- applyBiasFactor(frc, biasFactor)\n#' \n#' plotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n#' \n#' # You can also give name to this input list.\n#' TSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\n#' names(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\n#' plotTS(list = TSlist, plot = 'cum')\n#' \n#' \n#' # If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n#' # for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n#' # for example Dec, Jan and Feb of every year from year 1999-2005.\n#' # In such case, you need to extract certain months and years from observed time series.\n#' # extractPeriod() can be then used.\n#' \n#' \n#'\n#'\n#'\n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' @author Yuanchao Xu \\email{xuyuanchao37@@gmail.com }\n#' \n#' @importFrom methods setMethod\n#' @export\n#' \n#' \n# debug by trace(\"getBiasFactor\", browser, exit=browser, signature = c(\"list\", \"list\"))\nsetGeneric('getBiasFactor', function(hindcast, obs, method = 'scaling', scaleType = 'multi', \n preci = FALSE, prThreshold = 0, extrapolate = 'no') {\n standardGeneric('getBiasFactor')\n})\n\n#' @rdname getBiasFactor\nsetMethod('getBiasFactor', signature('data.frame', 'data.frame'), \n function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- getBiasFactor.TS(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n\n# This is for the grid file from downscaleR\n#' @rdname getBiasFactor\n#' @importFrom methods new\nsetMethod('getBiasFactor', signature('list', 'list'), \n function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- getBiasFactor.list(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n\n\n\n#' Apply bias factor to different forecasts for multi/operational/real time bias correction.\n#' \n#' When you do multi/operational/real time bias correction. It's too expensive\n#' to input hindcast and obs every time. Especially when you have a long period of hindcast\n#' and obs, but only a short period of frc, it's too unecessary to read and compute hindcast\n#' and obs everytime. Therefore, biasFactor is designed. Using \\code{getBiasFactor}, you can\n#' get the biasFactor with hindcast and observation, then you can use \\code{applyBiasFactor} to \n#' apply the biasFactor to different forecasts. \n#' \n#' @param frc a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \n#' representing the frc data. Check details for more information.\n#' @param biasFactor a file containing all the information of the calibration, will be\n#' applied to different forecasts.\n#' @param obs for some methods, observation input is necessary. obs is a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the observation data. Default value is NULL.\n#' @seealso \\code{\\link{biasCorrect}} for method used in bias correction. \n#' \\code{\\link{getBiasFactor}}, for the first part.\n#' \n#' @details \n#' \n#' Information about the method and how biasCorrect works can be found in \\code{\\link{biasCorrect}}\n#' \n#' \\strong{why use biasFactor}\n#' \n#' As for forecasting, for daily data, there is usually no need to have\n#' different bias factor every different day. You can calculate one bisa factor using a long\n#' period of hindcast and obs, and apply that factor to different frc.\n#' \n#' For example,\n#' \n#' You have 10 years of hindcast and observation. you want to do bias correction for some \n#' forecasting product, e.g. system 4. For system 4, each month, you will get a new forecast\n#' about the future 6 months. So if you want to do the real time bias correction, you have to\n#' take the 10 years of hindcast and observation data with you, and run \\code{biasCorrect} every\n#' time you get a new forecast. That's too expensive.\n#' \n#' For some practical use in forecasting, there isn't a so high demand for accuracy. E.g.,\n#' Maybe for February and March, you can use the same biasFactor, no need to do the computation \n#' again. \n#' \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples \n#' \n#' ######## hyfo grid file biascorrection\n#' ########\n#' \n#' # If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect\n#' # the file.\n#' \n#' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' data(tgridData)\n#' #' # Since the example data, has some NA values, the process will include some warning #message, \n#' # which can be ignored in this case.\n#' \n#' \n#' \n#' # Then we will use nc data as forecasting data, and use itself as hindcast data,\n#' # use tgridData as observation.\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData)\n#' newFrc <- applyBiasFactor(nc, biasFactor)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant',\n#' preci = TRUE)\n#' # This method needs obs input.\n#' newFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE)\n#' newFrc <- applyBiasFactor(nc, biasFactor) \n#' \n#' \n#' ######## Time series biascorrection\n#' ########\n#' \n#' # Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\n#' data(testdl)\n#' \n#' # common period has to be extracted in order to better train the forecast.\n#' \n#' datalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' frc <- datalist[[1]]\n#' hindcast <- datalist[[2]]\n#' obs <- datalist[[3]]\n#' \n#' \n#' # The data used here is just for example, so there could be negative data.\n#' \n#' # default method is scaling\n#' biasFactor <- getBiasFactor(hindcast, obs)\n#' frc_new <- applyBiasFactor(frc, biasFactor)\n#' \n#' # for precipitation data, extra process needs to be executed, so you have to tell\n#' # the program to it is a precipitation data.\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, preci = TRUE)\n#' frc_new1 <- applyBiasFactor(frc, biasFactor)\n#' \n#' # You can use other methods to biascorrect, e.g. delta method. \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'delta')\n#' # delta method needs obs input.\n#' frc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' # \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE)\n#' # eqm needs obs input\n#' frc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'gqm', preci = TRUE)\n#' frc_new4 <- applyBiasFactor(frc, biasFactor)\n#' \n#' plotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n#' \n#' # You can also give name to this input list.\n#' TSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\n#' names(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\n#' plotTS(list = TSlist, plot = 'cum')\n#' \n#' \n#' # If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n#' # for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n#' # for example Dec, Jan and Feb of every year from year 1999-2005.\n#' # In such case, you need to extract certain months and years from observed time series.\n#' # extractPeriod() can be then used.\n#' \n#' \n#'\n#'\n#'\n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' @author Yuanchao Xu \\email{xuyuanchao37@@gmail.com }\n#' \n#' @export\nsetGeneric('applyBiasFactor', function(frc, biasFactor, obs = NULL) {\n standardGeneric('applyBiasFactor')\n})\n\n#' @rdname applyBiasFactor\n#' @importFrom methods setMethod\nsetMethod('applyBiasFactor', signature('data.frame', 'biasFactor'), \n function(frc, biasFactor, obs) {\n result <- applyBiasFactor.TS(frc, biasFactor, obs)\n return(result)\n })\n \n#' @rdname applyBiasFactor\n#' @importFrom methods setMethod\nsetMethod('applyBiasFactor', signature('list', 'biasFactor.hyfo'), \n function(frc, biasFactor, obs) {\n result <- applyBiasFactor.list(frc, biasFactor, obs)\n return(result)\n })\n\n\n### generic functions\ngetBiasFactor.TS <- function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n \n if (!grepl('-|/', obs[1, 1]) | !grepl('-|/', hindcast[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.If your input is a hyfo dataset, put input = \"hyfo\" as an\n argument, check help for more info.')\n }\n \n # change to date type is easier, but in case in future the flood part is added, Date type doesn't have\n # hour, min and sec, so, it's better to convert it into POSIxlt.\n \n # if condition only accepts one condition, for list comparison, there are a lot of conditions, better\n # further process it, like using any.\n if (any(as.POSIXlt(hindcast[, 1]) != as.POSIXlt(obs[, 1]))) {\n warning('time of obs and time of hindcast are not the same, which may cause inaccuracy in \n the calibration.')\n }\n n <- ncol(hindcast)\n \n # For every column, it's biascorrected respectively.\n biasFactor <- lapply(2:n, function(x) getBiasFactor_core(hindcast[, x], obs[, 2], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate))\n if (n - 1 > 1) {\n biasFactor_all <- new('biasFactor.multiMember', biasFactor = biasFactor, memberDim = n - 1,\n method = method, preci = preci, prThreshold = prThreshold, scaleType = scaleType, \n extrapolate = extrapolate)\n \n } else {\n biasFactor_all <- new('biasFactor', biasFactor = biasFactor, method = method, \n preci = preci, prThreshold = prThreshold, scaleType = scaleType, \n extrapolate = extrapolate)\n }\n \n return(biasFactor_all)\n}\n\ngetBiasFactor.list <- function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n \n ## Check if the data is a hyfo grid data.\n checkHyfo(hindcast, obs)\n \n hindcastData <- hindcast$Data\n obsData <- obs$Data\n \n ## save frc dimension order, at last, set the dimension back to original dimension\n hindcastDim <- attributes(hindcastData)$dimensions\n \n ## ajust the dimension into general dimension order.\n obsData <- adjustDim(obsData, ref = c('lon', 'lat', 'time'))\n \n ## CheckDimLength, check if all the input dataset has different dimension length\n # i.e. if they all have the same lon and lat number.\n checkDimLength(hindcastData, obsData, dim = c('lon', 'lat'))\n \n \n # Now real bias correction is executed.\n \n memberIndex <- grepAndMatch('member', attributes(hindcastData)$dimensions)\n \n # For dataset that has a member part \n if (!is.na(memberIndex)) {\n \n hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time', 'member'))\n \n # The following code may speed up because it doesn't use for loop.\n # It firstly combine different array into one array. combine the time \n # dimension of frc, hindcast and obs. Then use apply, each time extract \n # the total time dimension, and first part is frc, second is hindcast, third\n # is obs. Then use these three parts to bias correct. All above can be written\n # in one function and called within apply. But too complicated to understand,\n # So save it for future use maybe.\n \n # for (member in 1:dim(frcData)[4]) {\n # totalArr <- array(c(frcData[,,, member], hindcastData[,,, member], obsData),\n # dim = c(dim(frcData)[1], dim(frcData)[2], \n # dim(frcData)[3] + dim(hindcastData)[3] + dim(obsData)[3]))\n # }\n \n biasFactor_all <- vector(mode = \"list\", length = dim(hindcastData)[4])\n for (member in 1:dim(hindcastData)[4]) {\n biasFactor_all[[member]] <- vector(mode = 'list', length = dim(hindcastData)[1])\n for (lon in 1:dim(hindcastData)[1]) {\n biasFactor_all[[member]][[lon]] <- vector(mode = 'list', length = dim(hindcastData)[2])\n for (lat in 1:dim(hindcastData)[2]) {\n biasFactor_all[[member]][[lon]][[lat]] <- getBiasFactor_core(hindcastData[lon, lat,, member], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n \n biasFactor <- new('biasFactor.hyfo', biasFactor = biasFactor_all, method = method, preci = preci,\n prThreshold = prThreshold, scaleType = scaleType, extrapolate = extrapolate, \n lonLatDim = calcuDim(hindcastData, dim = c('lon', 'lat')),\n memberDim = calcuDim(hindcastData, dim = 'member'))\n } else {\n \n hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time'))\n \n biasFactor_all <- vector(mode = 'list', length = dim(hindcastData)[1])\n for (lon in 1:dim(hindcastData)[1]) {\n biasFactor_all[[lon]] <- vector(mode = 'list', length = dim(hindcastData)[2]) \n for (lat in 1:dim(hindcastData)[2]) {\n biasFactor_all[[lon]][[lat]] <- getBiasFactor_core(hindcastData[lon, lat,], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n biasFactor <- new('biasFactor.hyfo', biasFactor = biasFactor_all, method = method, preci = preci,\n prThreshold = prThreshold, scaleType = scaleType, extrapolate = extrapolate, \n lonLatDim = calcuDim(hindcastData, dim = c('lon', 'lat')))\n \n }\n \n return(biasFactor)\n}\n\napplyBiasFactor.TS <- function(frc, biasFactor, obs) {\n method <- biasFactor@method\n preci <- biasFactor@preci\n prThreshold <- biasFactor@prThreshold\n scaleType <- biasFactor@scaleType\n extrapolate <- biasFactor@extrapolate\n memberDim <- biasFactor@memberDim\n biasFactor <- biasFactor@biasFactor\n \n \n # First check if the first column is Date\n if (!grepl('-|/', frc[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.If your input is a hyfo dataset, put input = \"hyfo\" as an\n argument, check help for more info.')\n }\n # change to date type is easier, but in case in future the flood part is added, Date type doesn't have\n # hour, min and sec, so, it's better to convert it into POSIxlt.\n \n # In this case more than one value columns exist in the dataset, both frc and hindcast.\n \n n <- ncol(frc)\n if (n-1 != memberDim) stop('frc and biasFactor have different members.')\n \n \n # For every column, it's biascorrected respectively.\n frc_data <- lapply(2:n, function(x) applyBiasFactor_core(frc[, x], biasFactor = biasFactor[[x - 1]], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate, obs = obs[, 2]))\n frc_data <- do.call('cbind', frc_data)\n rownames(frc_data) <- NULL\n \n names <- colnames(frc)\n frc_new <- data.frame(frc[, 1], frc_data)\n colnames(frc_new) <- names\n \n return(frc_new)\n \n}\n\napplyBiasFactor.list <- function(frc, biasFactor, obs) {\n method <- biasFactor@method\n preci <- biasFactor@preci\n prThreshold <- biasFactor@prThreshold\n scaleType <- biasFactor@scaleType\n extrapolate <- biasFactor@extrapolate\n lonLatDim <- biasFactor@lonLatDim\n memberDim <- biasFactor@memberDim\n biasFactor <- biasFactor@biasFactor\n \n ## Check if the data is a hyfo grid data.\n checkHyfo(frc)\n \n \n obsData <- obs$Data\n frcData <- frc$Data\n \n ## save frc dimension order, at last, set the dimension back to original dimension\n frcDim <- attributes(frcData)$dimensions\n \n ## ajust the dimension into general dimension order.\n obsData <- adjustDim(obsData, ref = c('lon', 'lat', 'time'))\n \n ## CheckDimLength, check if all the input dataset has different dimension length\n # i.e. if they all have the same lon and lat number.\n if (!identical(calcuDim(frcData, dim = c('lon', 'lat')), lonLatDim)) {\n stop('frc data has different lon and lat from hindcast data.')\n }\n \n \n # Now real bias correction is executed.\n \n memberIndex <- grepAndMatch('member', attributes(frcData)$dimensions)\n \n # For dataset that has a member part \n if (!is.na(memberIndex)) {\n # check if frcData and hindcastData has the same dimension and length.\n if (calcuDim(frcData, dim = 'member') != memberDim) {\n stop('frc data has different member number from hindcast.')\n } \n \n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time', 'member'))\n \n # The following code may speed up because it doesn't use for loop.\n # It firstly combine different array into one array. combine the time \n # dimension of frc, hindcast and obs. Then use apply, each time extract \n # the total time dimension, and first part is frc, second is hindcast, third\n # is obs. Then use these three parts to bias correct. All above can be written\n # in one function and called within apply. But too complicated to understand,\n # So save it for future use maybe.\n \n # for (member in 1:dim(frcData)[4]) {\n # totalArr <- array(c(frcData[,,, member], hindcastData[,,, member], obsData),\n # dim = c(dim(frcData)[1], dim(frcData)[2], \n # dim(frcData)[3] + dim(hindcastData)[3] + dim(obsData)[3]))\n # }\n \n \n for (member in 1:dim(frcData)[4]) {\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,, member] <- applyBiasFactor_core(frcData[lon, lat,,member], biasFactor = biasFactor[[member]][[lon]][[lat]], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate, obs = obsData[lon, lat,])\n }\n }\n }\n } else {\n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time'))\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,] <- applyBiasFactor_core(frcData[lon, lat,], biasFactor = biasFactor[[lon]][[lat]], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate, obs = obsData[lon, lat,])\n }\n }\n }\n \n frcData <- adjustDim(frcData, ref = frcDim)\n frc$Data <- frcData\n frc$biasCorrected_by <- method\n frc_new <- frc\n \n return(frc_new)\n}\n\n\n#################\n################# core functions for multi bias correction.\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats ecdf quantile pgamma qgamma rgamma\n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\ngetBiasFactor_core <- function(hindcast, obs, method , scaleType, preci, prThreshold, extrapolate){\n # If the variable is precipitation, some further process needs to be added.\n # The process is taken from downscaleR, to provide a more reasonable hindcast, used in the calibration.\n \n \n # check if frc, hindcast or obs are all na values\n if (!any(!is.na(obs)) | !any(!is.na(hindcast))) {\n warning('In this cell, hindcast or obs data is missing. No biasCorrection for this cell.')\n return(NA)\n } \n \n if (preci == TRUE) {\n preprocessHindcast_res <- preprocessHindcast(hindcast = hindcast, obs = obs, prThreshold = prThreshold)\n hindcast <- preprocessHindcast_res[[1]]\n minHindcastPreci <- preprocessHindcast_res[[2]]\n }\n \n # default is the simplest method in biascorrection, just do simple addition and subtraction.\n if (method == 'delta') {\n biasFactor <- getBiasFactor_core_delta(hindcast)\n } else if (method == 'scaling') {\n biasFactor <- getBiasFactor_core_scaling(hindcast, obs, scaleType)\n } else if (method == 'eqm') {\n # In this method, the value is bounded by the observation\n # Preci or not both have the same biasFactor\n if (preci == FALSE) {\n biasFactor <- getBiasFactor_core_eqm_nonPreci(hindcast, obs, extrapolate)\n } else {\n biasFactor <- getBiasFactor_core_eqm_preci(hindcast, obs, minHindcastPreci, extrapolate, prThreshold)\n }\n \n \n } else if (method == 'gqm') {\n if (preci == FALSE) stop ('gqm method only applys to precipitation, please set preci = T')\n biasFactor <- getBiasFactor_core_gqm(hindcast, obs, prThreshold, minHindcastPreci)\n }\n \n if (preci == TRUE) biasFactor$minHindcastPreci <- minHindcastPreci\n \n return(biasFactor)\n}\n\n\napplyBiasFactor_core <- function(frc, biasFactor, method, preci, prThreshold, scaleType,\n extrapolate, obs = NULL) {\n \n if (!any(!is.na(biasFactor))) {\n warning('In this cell, biasFactor is missing.No biasCorrection for this cell.')\n # here return NA or return the unprocessed frc, both are OK. But return NA is more\n # obvious for user.\n return(NA)\n }\n \n if (method == 'delta') {\n if (is.null(obs)) stop('This method needs obs input.')\n if (length(frc) != length(obs)) stop('This method needs frc data have the same length as obs data.')\n frc <- applyBiasFactor_core_delta(frc = frc, biasFactor = biasFactor, obs = obs)\n } else if (method == 'scaling') {\n frc <- applyBiasFactor_core_scaling(frc = frc, biasFactor = biasFactor, scaleType = scaleType)\n } else if (method == 'eqm') {\n if (is.null(obs)) stop('This method needs obs input.')\n if (preci == FALSE) {\n frc <- applyBiasFactor_core_eqm_nonPreci(frc = frc, biasFactor = biasFactor, extrapolate = extrapolate, \n obs = obs)\n } else {\n frc <- applyBiasFactor_core_eqm_preci(frc = frc, biasFactor = biasFactor, extrapolate = extrapolate, \n prThreshold = prThreshold, obs = obs)\n }\n } else if (method == 'gqm') {\n frc <- applyBiasFactor_core_gqm(frc = frc, biasFactor = biasFactor)\n }\n \n return(frc)\n}\n\n\ngetBiasFactor_core_delta <- function(hindcast) {\n biasFactor <- list()\n biasFactor$hindcastMean <- mean(hindcast, na.rm = TRUE)\n return(biasFactor)\n}\napplyBiasFactor_core_delta <- function(frc, biasFactor, obs) {\n hindcastMean <- biasFactor$hindcastMean\n frcMean <- mean(frc, na.rm = TRUE)\n return(obs - hindcastMean + frcMean)\n}\n\ngetBiasFactor_core_scaling <- function(hindcast, obs, scaleType) {\n biasFactor <- list()\n \n hindcastMean <- mean(hindcast, na.rm = TRUE)\n obsMean <- mean(obs, na.rm = TRUE)\n \n if (scaleType == 'multi') {\n biasFactor$scale <- obsMean / hindcastMean\n \n } else if (scaleType == 'add') {\n biasFactor$scale <- obsMean - hindcastMean\n }\n \n return(biasFactor)\n}\n\napplyBiasFactor_core_scaling <- function(frc, biasFactor, scaleType) {\n \n if (scaleType == 'multi') {\n frc <- frc * biasFactor$scale\n \n } else if (scaleType == 'add') {\n frc <- frc + biasFactor$scale\n }\n return(frc)\n}\n\ngetBiasFactor_core_eqm_nonPreci <- function(hindcast, obs, extrapolate) {\n \n biasFactor <- list()\n biasFactor$ecdfHindcast <- ecdf(hindcast)\n \n if (extrapolate == 'constant') {\n biasFactor$maxHindcast <- max(hindcast, na.rm = TRUE)\n biasFactor$minHindcast <- min(hindcast, na.rm = TRUE)\n biasFactor$higherIndex_dif <- biasFactor$maxHindcast - max(obs, na.rm = TRUE)\n biasFactor$lowerIndex_dif <- biasFactor$minHindcast - min(obs, na.rm = TRUE)\n }\n \n return(biasFactor)\n}\n\ngetBiasFactor_core_eqm_preci <- function(hindcast, obs, minHindcastPreci, extrapolate,\n prThreshold) {\n \n biasFactor <- list()\n biasFactor$ecdfHindcast <- ecdf(hindcast[hindcast > minHindcastPreci])\n \n if (extrapolate == 'constant') {\n biasFactor$maxHindcast <- max(hindcast, na.rm = TRUE)\n biasFactor$minHindcast <- min(hindcast, na.rm = TRUE)\n biasFactor$higherIndex_dif <- biasFactor$maxHindcast - max(obs, na.rm = TRUE)\n biasFactor$lowerIndex_dif <- biasFactor$minHindcast - min(obs, nna.rm = TRUE)\n }\n biasFactor$availableHindcastLength <- length(which(hindcast > minHindcastPreci))\n \n # drizzle parameter 1\n biasFactor$drizzleP1 <- min(hindcast[hindcast > minHindcastPreci], na.rm = TRUE)\n # biasFactor$prThreshold <- prThreshold\n return(biasFactor)\n}\n\napplyBiasFactor_core_eqm_nonPreci <- function(frc, biasFactor, extrapolate, obs) {\n ecdfHindcast <- biasFactor$ecdfHindcast\n \n if (extrapolate == 'constant') {\n higherIndex <- which(frc > biasFactor$maxHindcast)\n lowerIndex <- which(frc < biasFactor$minHindcast)\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(frc), extrapolateIndex)\n \n # In case extrapolateIndex is of length zero, than extrapolate cannot be used afterwards\n # So use setdiff(1:length(sim), extrapolateIndex), if extrapolateIndex == 0, than it will\n # return 1:length(sim)\n \n if (length(higherIndex) > 0) {\n \n frc[higherIndex] <- frc[higherIndex] - biasFactor$higherIndex_dif\n }\n \n if (length(lowerIndex) > 0) {\n \n frc[lowerIndex] <- frc[lowerIndex] - biasFactor$lowerIndex_dif\n }\n \n frc[non_extrapolateIndex] <- quantile(obs, probs = ecdfHindcast(frc[non_extrapolateIndex]), \n na.rm = TRUE, type = 4)\n } else {\n frc <- quantile(obs, probs = ecdfHindcast(frc), na.rm = TRUE, type = 4)\n }\n return(frc)\n}\n\n#' @importFrom stats quantile\napplyBiasFactor_core_eqm_preci <- function(frc, biasFactor, extrapolate, prThreshold, obs) {\n \n # Most of time this condition seems useless because minHindcastPreci comes from hindcast, so there will be\n # always hindcast > minHindcastPreci exists.\n # Unless one condition that minHindcastPreci is the max in the hindcast, than on hindcast > minHindcastPreci\n if (biasFactor$availableHindcastLength > 0) {\n \n ecdfHindcast <- biasFactor$ecdfHindcast\n \n noRain <- which(frc <= biasFactor$minHindcastPreci & !is.na(frc))\n rain <- which(frc > biasFactor$minHindcastPreci & !is.na(frc))\n \n # drizzle is to see whether there are some precipitation between the min frc (over threshold) and \n # min hindcast (over threshold).\n drizzle <- which(frc > biasFactor$minHindcastPreci & frc <= biasFactor$drizzleP1 & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n \n if (extrapolate == 'constant') {\n \n # This higher and lower index mean the extrapolation part\n higherIndex <- which(frc[rain] > biasFactor$maxHindcast)\n lowerIndex <- which(frc[rain] < biasFactor$minHindcast)\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(rain), extrapolateIndex)\n \n if (length(higherIndex) > 0) {\n frc[rain[higherIndex]] <- frc[higherIndex] - biasFactor$higherIndex_dif\n }\n \n if (length(lowerIndex) > 0) {\n frc[rain[lowerIndex]] <- frc[lowerIndex] - biasFactor$lowerIndex_dif\n }\n \n \n # Here the original function doesn't accout for the situation that extraploateIndex is 0\n # if it is 0, rain[-extraploateIndex] would be nothing\n \n # Above has been solved by using setdiff.\n frc[rain[non_extrapolateIndex]] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain[non_extrapolateIndex]]), \n na.rm = TRUE, type = 4)\n \n } else {\n \n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain]), na.rm = TRUE, type = 4)\n }\n }\n if (length(drizzle) > 0){\n \n # drizzle part is a seperate part. it use the ecdf of frc (larger than minHindcastPreci) to \n # biascorrect the original drizzle part \n frc[drizzle] <- quantile(frc[which(frc > biasFactor$drizzleP1 & !is.na(frc))], \n probs = ecdfFrc(frc[drizzle]), na.rm = TRUE, \n type = 4)\n }\n \n frc[noRain] <- 0\n \n } else {\n # in this condition minHindcastPreci is the max of hindcast, so all hindcast <= minHindcastPreci\n # And frc distribution is used then.\n noRain <- which(frc <= biasFactor$minHindcastPreci & !is.na(frc))\n rain <- which(frc > biasFactor$minHindcastPreci & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], probs = ecdfFrc(frc[rain]), \n na.rm = TRUE, type = 4)\n }\n frc[noRain]<-0\n }\n return(frc)\n}\n\n#' @importFrom MASS fitdistr\ngetBiasFactor_core_gqm <- function(hindcast, obs, prThreshold, minHindcastPreci) {\n if (any(obs > prThreshold)) {\n biasFactor <- list()\n ind <- which(obs > prThreshold & !is.na(obs))\n obsGamma <- fitdistr(obs[ind],\"gamma\")\n biasFactor$obsShape <- obsGamma$estimate[1]\n biasFactor$obsRate <- obsGamma$estimate[2]\n \n ind <- which(hindcast > 0 & !is.na(hindcast))\n hindcastGamma <- fitdistr(hindcast[ind],\"gamma\")\n biasFactor$hindcastShape <- hindcastGamma$estimate[1]\n biasFactor$hindcastRate <- hindcastGamma$estimate[2]\n biasFactor$minHindcastPreci <- minHindcastPreci\n \n } else {\n warning('All the observations of this cell(station) are lower than the threshold, \n no biasFactor returned.')\n biasFactor <- NA\n }\n return(biasFactor)\n}\n\n#' @importFrom stats pgamma qgamma\napplyBiasFactor_core_gqm <- function(frc, biasFactor) {\n \n rain <- which(frc > biasFactor$minHindcastPreci & !is.na(frc))\n noRain <- which(frc <= biasFactor$minHindcastPreci & !is.na(frc))\n \n probF <- pgamma(frc[rain], biasFactor$hindcastShape, rate = biasFactor$hindcastRate)\n frc[rain] <- qgamma(probF, biasFactor$obsShape, rate = biasFactor$obsRate)\n frc[noRain] <- 0\n \n return(frc)\n}", + "created" : 1483876652911.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1721961410", + "id" : "882400E4", + "lastKnownWriteTime" : 1488017859, + "last_content_update" : 1488017859911, + "path" : "~/GitHub/hyfo/R/multi-biasCorrect(generic).R", + "project_path" : "R/multi-biasCorrect(generic).R", + "properties" : { + }, + "relative_order" : 5, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/8E431305 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/8E431305 new file mode 100644 index 0000000..0afb3cc --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/8E431305 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/case_anarbe.R\n\\name{collectData_csv_anarbe}\n\\alias{collectData_csv_anarbe}\n\\title{Collect data from csv for Anarbe case.}\n\\source{\nhttp://meteo.navarra.es/estaciones/mapadeestaciones.cfm\n}\n\\usage{\ncollectData_csv_anarbe(folderName, output = TRUE)\n}\n\\arguments{\n\\item{folderName}{A string showing the path of the folder holding different csv files.}\n\n\\item{output}{A boolean showing whether the output is given, default is T.}\n}\n\\value{\nThe collected data from different csv files.\n}\n\\description{\nCollect data from the gauging stations in spain, catchement Anarbe\n}\n\\examples{\n\n#use internal data as an example.\nfile <- system.file(\"extdata\", \"1999.csv\", package = \"hyfo\")\nfolder <- strsplit(file, '1999')[[1]][1]\na <- collectData_csv_anarbe(folder)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item http://meteo.navarra.es/estaciones/mapadeestaciones.cfm\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n}\n}\n\n", + "created" : 1487955962873.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1757137851", + "id" : "8E431305", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/collectData_csv_anarbe.Rd", + "project_path" : "man/collectData_csv_anarbe.Rd", + "properties" : { + }, + "relative_order" : 23, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/90EB6DDD b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/90EB6DDD new file mode 100644 index 0000000..7cf6b4a --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/90EB6DDD @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getEnsemble.R\n\\name{getFrcEnsem}\n\\alias{getFrcEnsem}\n\\title{Extract time series from forecasting data.}\n\\usage{\ngetFrcEnsem(dataset, cell = \"mean\", plot = \"norm\", output = \"data\",\n name = NULL, mv = 0, coord = NULL, ...)\n}\n\\arguments{\n\\item{dataset}{A list containing different information, should be the result of \\code{\\link{loadNcdf}}}\n\n\\item{cell}{A vector containing the locaton of the cell, e.g. c(2, 3), default is \"mean\", representing\nthe spatially averaged value. Check details for more information.}\n\n\\item{plot}{A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), \n'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.}\n\n\\item{output}{A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \ndata that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\nplots afterwards. NOTE: If \\code{output = 'ggplot'}, the missing value in the data will\nbe replaced by \\code{mv}, if assigned, default mv is 0.}\n\n\\item{name}{If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\ndifferent outputs in the later multiplot using \\code{getEnsem_comb}.}\n\n\\item{mv}{A number showing representing the missing value. When calculating the cumulative value, \nmissing value will be replaced by mv, default is 0.}\n\n\\item{coord}{A coordinate of longitude and latitude. e.g. corrd = c(lon, lat). If coord is assigned,\ncell argument will no longer be used.}\n\n\\item{...}{\\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}}\n}\n\\value{\nA ensemble time series extracted from forecating data.\n}\n\\description{\ngetFrcEnsem extract timeseries from forecasting data, if forecasting data has a member session\nan ensemble time sereis will be returned, if forecasting data doesn't have a member session, a singe time\nseries will be returned.\n}\n\\details{\n\\code{cell} representing the location of the cell, NOTE: this location means the index of the cell,\nIT IS NOT THE LONGITUDE AND LATITUDE. e.g., \\code{cell = c(2, 3)}, the program will take the 2nd longitude\nand 3rd latitude, by the increasing order. Longitude comes first.\n\n\\code{name}\nAssuming you have two ggplot outputs, you want to plot them together. In this situation, you\nneed a name column to differentiate one ggplot output from the other. You can assigne this name\nby the argument directly, If name is not assigned and \\code{output = 'ggplot'} is selected, then\nthe system time will be selected as name column.\n}\n\\examples{\n\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n# Then if you don't know the variable name, you can use \\\\code{getNcdfVar} to get variable name\nvarname <- getNcdfVar(filePath)\nnc <- loadNcdf(filePath, varname)\na <- getFrcEnsem(nc)\n\n# If there is no member session in the dataset, a single time sereis will be extracted.\na1 <- getFrcEnsem(tgridData)\n\n\n# The default output is spatially averaged, if there are more than one cells in the dataset, \n# the mean value of the cells will be calculated. While if you are interested in special cell, \n# you can assign the cell value. You can also directly use longitude and latitude to extract \n# time series.\n\ngetSpatialMap(nc, 'mean')\na <- getFrcEnsem(nc, cell = c(6,2))\n\n# From the map, cell = c(6, 2) means lon = -1.4, lat = 43.2, so you can use corrd to locate\n# your research area and extract time series.\nb <- getFrcEnsem(nc, coord = c(-1.4, 43.2))\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n\\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n\\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and\nstatistical downscaling. R package version 0.6-0.\nhttps://github.com/SantanderMetGroup/downscaleR/wiki\n}\n}\n\n", + "created" : 1487956260627.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "124886659", + "id" : "90EB6DDD", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getFrcEnsem.Rd", + "project_path" : "man/getFrcEnsem.Rd", + "properties" : { + }, + "relative_order" : 32, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/92757319 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/92757319 new file mode 100644 index 0000000..06e95c7 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/92757319 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/case_anarbe.R\n\\name{collectData_txt_anarbe}\n\\alias{collectData_txt_anarbe}\n\\title{collect data from different txt.}\n\\source{\nhttp://www4.gipuzkoa.net/oohh/web/esp/02.asp\n}\n\\usage{\ncollectData_txt_anarbe(folderName, output = TRUE,\n rangeWord = c(\"Ene \", -1, \"Total \", -6))\n}\n\\arguments{\n\\item{folderName}{A string showing the folder path.}\n\n\\item{output}{A boolean showing whether the result is given.}\n\n\\item{rangeWord}{A list containing the keyword and the shift. \ndefaut is set to be used in spain gauging station.}\n}\n\\value{\nThe collected data from different txt files.\n}\n\\description{\ncollect data from different txt.\n}\n\\examples{\n \n#use internal data as an example.\n\n\\dontrun{\nfile <- system.file(\"extdata\", \"1999.csv\", package = \"hyfo\")\nfolder <- strsplit(file, '1999')[[1]][1]\na <- collectData_txt_anarbe(folder)\n}\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item http://www4.gipuzkoa.net/oohh/web/esp/02.asp\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n}\n}\n\n", + "created" : 1487956065089.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3252535590", + "id" : "92757319", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/collectData_txt_anarbe.Rd", + "project_path" : "man/collectData_txt_anarbe.Rd", + "properties" : { + }, + "relative_order" : 25, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9A428717 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9A428717 new file mode 100644 index 0000000..d65b23c --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9A428717 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Resample your time series or ncdf files.\n#' \n#' Resameple your time series or ncdf files, more info pleae see details.\n#' \n#' \n#' @param data a hyfo grid data or a time series, with first column date, and second column value. The date column should\n#' follow the format in \\code{as.Date}, i.e. seperate with \"-\" or \"/\". Check details for more information.\n#' @param method A string showing whether you want to change a daily data to monthly data or monthly\n#' data to daily data.e.g. \"mon2day\" and \"day2mon\".\n#' @details \n#' Note, when you want to change daily data to monthly data, a new date column will be generated,\n#' usually the date column will be the middle date of each month, 15th, or 16th. However, if your \n#' time series doesn't start from the beginning of a month or ends to the end of a month, e.g. \n#' from 1999-3-14 to 2008-2-2, the first and last generated date could be wrong. Not only the date, but also the data, because you are \n#' not calculating based on a intact month. \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @return converted time series.\n#' @examples\n#' # Daily to monthly\n#' data(testdl)\n#' TS <- testdl[[2]] # Get daily data\n#' str(TS)\n#' TS_new <- resample(TS, method = 'day2mon')\n#' \n#' # Monthly to daily\n#' TS <- data.frame(Date = seq(as.Date('1999-9-15'), length = 30, by = '1 month'), \n#' runif(30, 3, 10))\n#' TS_new <- resample(TS, method = 'mon2day')\n#' \n#' #' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' nc_new <- resample(nc, 'day2mon')\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @importFrom stats aggregate\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \nsetGeneric('resample', function(data, method) {\n standardGeneric('resample')\n})\n\n\n#' @rdname resample\nsetMethod('resample', signature('data.frame'),\n function(data, method) {\n result <- resample.TS(data, method)\n return(result)\n })\n\n#' @rdname resample\nsetMethod('resample', signature('list'),\n function(data, method) {\n result <- resample.list(data, method)\n return(result)\n })\n\n\n\n#' @importFrom stats aggregate\n#' @importFrom data.table rbindlist\nresample.TS <- function(TS, method) {\n if (length(TS) != 2) {\n stop('Time series not correct, should be two columns, Date and value.')\n } else if (!grepl('-|/', TS[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.')\n } \n \n \n if (method == 'mon2day') {\n \n data <- apply(TS, MARGIN = 1 , FUN = mon2day)\n \n output <- rbindlist(data)\n } else if (method == 'day2mon') {\n Date <- as.Date(TS[, 1])\n year <- format(Date, format = '%Y')\n mon <- format(Date, format = '%m')\n \n data <- aggregate(TS, by = list(mon, year), FUN = mean, na.rm = TRUE)[, 3:4]\n rownames(data) <- 1:dim(data)[1]\n output <- data\n } else {\n stop('method is not correct, check method argument.')\n }\n \n return (output)\n}\n\n#' @importFrom stats aggregate\nresample.list <- function(hyfo, method) {\n checkHyfo(hyfo)\n hyfoData <- hyfo$Data\n Date <- as.POSIXlt(hyfo$Dates$start)\n year <- Date$year + 1900\n mon <- Date$mon + 1\n # hyfoDim <- attributes(hyfoData)$dimensions\n # resample focuses on time dimension. No matter whether the member dimension exists.\n timeIndex <- match('time', attributes(hyfoData)$dimensions)\n dimArray <- 1:length(attributes(hyfoData)$dimensions)\n \n if (method == 'day2mon') {\n hyfoData <- apply(hyfoData, MARGIN = dimArray[-timeIndex], \n function(x) aggregate(x, by = list(mon, year), FUN = mean, na.rm = TRUE)[, 3])\n Date <- aggregate(Date, by = list(mon, year), FUN = mean, na.rm = TRUE)[, 3]\n } else if (method == 'mon2day') {\n message('Under development.')\n }\n \n hyfo$Dates$start <- Date\n hyfo$Data <- hyfoData\n return(hyfo)\n}\n\n\n\n\n#' @importFrom utils tail\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \nmon2day <- function(monData) {\n Date <- as.Date(monData[1])\n data <- monData[2]\n \n DateY <- format(Date, format = '%Y')\n DateM <- format(Date, format = '%m')\n DateL <- seq(Date, length = 2, by = '1 months')[2] - Date\n \n DateD <- 1:DateL\n \n start <- as.Date(paste(DateY, DateM, DateD[1], sep = '-'))\n end <- as.Date(paste(DateY, DateM, tail(DateD, 1), sep = '-'))\n \n Date <- seq(start, end, by = '1 day')\n \n dailyData <- data.frame(Date = Date, value = rep(data, DateL))\n \n return(dailyData)\n}", + "created" : 1483876700794.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3646914081", + "id" : "9A428717", + "lastKnownWriteTime" : 1488018115, + "last_content_update" : 1488018115688, + "path" : "~/GitHub/hyfo/R/resample(generic).R", + "project_path" : "R/resample(generic).R", + "properties" : { + }, + "relative_order" : 6, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9CAB49AF b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9CAB49AF new file mode 100644 index 0000000..f23fe99 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9CAB49AF @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/analyzeTS.R\n\\name{plotTS}\n\\alias{plotTS}\n\\title{plot time series, with marks on missing value.}\n\\usage{\nplotTS(..., type = \"line\", output = \"data\", plot = \"norm\", name = NULL,\n showNA = TRUE, x = NULL, y = NULL, title = NULL, list = NULL)\n}\n\\arguments{\n\\item{...}{input time series.}\n\n\\item{type}{A string representing the type of the time series, e.g. 'line' or 'bar'.}\n\n\\item{output}{A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \ndata that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\nplots afterwards.}\n\n\\item{plot}{representing the plot type, there are two types, \"norm\" and \"cum\", \"norm\" gives an normal\nplot, and \"cum\" gives a cumulative plot. Default is \"norm\".}\n\n\\item{name}{If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\ndifferent outputs in the later multiplot using \\code{plotTS_comb}.}\n\n\\item{showNA}{A boolean representing whether the NA values should be marked, default is TRUE.}\n\n\\item{x}{label for x axis.}\n\n\\item{y}{label for y axis.}\n\n\\item{title}{plot title.}\n\n\\item{list}{If your input is a list of time series, then use \\code{list = your time sereis list}}\n}\n\\value{\nA plot of the input time series.\n}\n\\description{\nplot time series, with marks on missing value.\n}\n\\details{\nIf your input has more than one time series, the program will only plot the common period of \ndifferent time series.\n}\n\\examples{\nplotTS(testdl[[1]])\nplotTS(testdl[[1]], x = 'xxx', y = 'yyy', title = 'aaa')\n\n# If input is a datalist\nplotTS(list = testdl)\n\n# Or if you want to input time series one by one\n# If plot = 'cum' then cumulative curve will be plotted.\nplotTS(testdl[[1]], testdl[[2]], plot = 'cum')\n\n# You can also directly plot multicolumn dataframe\ndataframe <- list2Dataframe(extractPeriod(testdl, commonPeriod = TRUE))\nplotTS(dataframe, plot = 'cum')\n\n# Sometimes you may want to process the dataframe and compare with the original one\ndataframe1 <- dataframe\ndataframe1[, 2:4] <- dataframe1[, 2:4] + 3\nplotTS(dataframe, dataframe1, plot = 'cum')\n# But note, if your input is a multi column dataframe, it's better to plot one using plotTS,\n# and compare them using plotTS_comb. If all data are in one plot, there might be too messy.\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n}\n}\n\n", + "created" : 1488014892948.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "921047716", + "id" : "9CAB49AF", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/plotTS.Rd", + "project_path" : "man/plotTS.Rd", + "properties" : { + }, + "relative_order" : 44, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9DAD3561 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9DAD3561 new file mode 100644 index 0000000..0129592 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/9DAD3561 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/multi-biasCorrect(generic).R\n\\docType{methods}\n\\name{getBiasFactor}\n\\alias{getBiasFactor}\n\\alias{getBiasFactor,data.frame,data.frame-method}\n\\alias{getBiasFactor,list,list-method}\n\\title{Get bias factor for multi/operational/real time bias correction.}\n\\usage{\ngetBiasFactor(hindcast, obs, method = \"scaling\", scaleType = \"multi\",\n preci = FALSE, prThreshold = 0, extrapolate = \"no\")\n\n\\S4method{getBiasFactor}{data.frame,data.frame}(hindcast, obs,\n method = \"scaling\", scaleType = \"multi\", preci = FALSE,\n prThreshold = 0, extrapolate = \"no\")\n\n\\S4method{getBiasFactor}{list,list}(hindcast, obs, method = \"scaling\",\n scaleType = \"multi\", preci = FALSE, prThreshold = 0,\n extrapolate = \"no\")\n}\n\\arguments{\n\\item{hindcast}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \nrepresenting the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as\nobservation data. Check details for more information.}\n\n\\item{obs}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \nrepresenting the observation data.}\n\n\\item{method}{bias correct method, including 'delta', 'scaling'...,default method is 'scaling'.}\n\n\\item{scaleType}{only when the method \"scaling\" is chosen, scaleType will be available. Two different types\nof scaling method, 'add' and 'multi', which means additive and multiplicative scaling method, default is 'multi'. More info check \ndetails.}\n\n\\item{preci}{If the precipitation is biascorrected, then you have to assign \\code{preci = TRUE}. Since for\nprecipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. \nDefault is FALSE, refer to details.}\n\n\\item{prThreshold}{The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).}\n\n\\item{extrapolate}{When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs.\nIf 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.}\n}\n\\description{\nWhen you do multi/operational/real time bias correction. It's too expensive\nto input hindcast and obs every time. Especially when you have a long period of hindcast\nand obs, but only a short period of frc, it's too unecessary to read and compute hindcast\nand obs everytime. Therefore, biasFactor is designed. Using \\code{getBiasFactor}, you can\nget the biasFactor with hindcast and observation, then you can use \\code{applyBiasFactor} to \napply the biasFactor to different forecasts.\n}\n\\details{\nInformation about the method and how biasCorrect works can be found in \\code{\\link{biasCorrect}}\n\n\\strong{why use biasFactor}\n\nAs for forecasting, for daily data, there is usually no need to have\ndifferent bias factor every different day. You can calculate one bisa factor using a long\nperiod of hindcast and obs, and apply that factor to different frc.\n\nFor example,\n\nYou have 10 years of hindcast and observation. you want to do bias correction for some \nforecasting product, e.g. system 4. For system 4, each month, you will get a new forecast\nabout the future 6 months. So if you want to do the real time bias correction, you have to\ntake the 10 years of hindcast and observation data with you, and run \\code{biasCorrect} every\ntime you get a new forecast. That's too expensive.\n\nFor some practical use in forecasting, there isn't a so high demand for accuracy. E.g.,\nMaybe for February and March, you can use the same biasFactor, no need to do the computation \nagain. \n \nIt is a generic function, if in your case you need to debug, please see \\code{?debug()} \nfor how to debug S4 method.\n}\n\\examples{\n\n######## hyfo grid file biascorrection\n########\n\n# If your input is obtained by \\\\code{loadNcdf}, you can also directly biascorrect\n# the file.\n\n# First load ncdf file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\nvarname <- getNcdfVar(filePath) \nnc <- loadNcdf(filePath, varname)\n\ndata(tgridData)\n# Since the example data, has some NA values, the process will include some warning #message, \n# which can be ignored in this case.\n\n\n\n# Then we will use nc data as forecasting data, and use itself as hindcast data,\n# use tgridData as observation.\n\nbiasFactor <- getBiasFactor(nc, tgridData)\nnewFrc <- applyBiasFactor(nc, biasFactor)\n \nbiasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant',\npreci = TRUE)\n# This method needs obs input.\nnewFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData)\n\nbiasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE)\nnewFrc <- applyBiasFactor(nc, biasFactor) \n\n\n######## Time series biascorrection\n########\n\n# Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\ndata(testdl)\n\n# common period has to be extracted in order to better train the forecast.\n\ndatalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n\nfrc <- datalist[[1]]\nhindcast <- datalist[[2]]\nobs <- datalist[[3]]\n\n\n# The data used here is just for example, so there could be negative data.\n\n# default method is scaling\nbiasFactor <- getBiasFactor(hindcast, obs)\nfrc_new <- applyBiasFactor(frc, biasFactor)\n\n# for precipitation data, extra process needs to be executed, so you have to tell\n# the program to it is a precipitation data.\n\nbiasFactor <- getBiasFactor(hindcast, obs, preci = TRUE)\nfrc_new1 <- applyBiasFactor(frc, biasFactor)\n\n# You can use other methods to biascorrect, e.g. delta method. \nbiasFactor <- getBiasFactor(hindcast, obs, method = 'delta')\n# delta method needs obs input.\nfrc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs)\n\n# \nbiasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE)\n# eqm needs obs input\nfrc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs)\n\nbiasFactor <- getBiasFactor(hindcast, obs, method = 'gqm', preci = TRUE)\nfrc_new4 <- applyBiasFactor(frc, biasFactor)\n\nplotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n\n# You can also give name to this input list.\nTSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\nnames(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\nplotTS(list = TSlist, plot = 'cum')\n\n\n# If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n# for example Dec, Jan and Feb of every year from year 1999-2005.\n# In such case, you need to extract certain months and years from observed time series.\n# extractPeriod() can be then used.\n \n \n\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n\n}\n\\author{\nYuanchao Xu \\email{xuyuanchao37@gmail.com }\n}\n\\references{\nBias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n\n\\itemize{\n\n\\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\npackage version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n\n\\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n\n\\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n\n\\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n\n\\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n}\n}\n\\seealso{\n\\code{\\link{biasCorrect}} for method used in bias correction.\n\\code{\\link{applyBiasFactor}}, for the second part.\n}\n\n", + "created" : 1487956242592.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "670227453", + "id" : "9DAD3561", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getBiasFactor.Rd", + "project_path" : "man/getBiasFactor.Rd", + "properties" : { + }, + "relative_order" : 30, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A0BF5A09 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A0BF5A09 new file mode 100644 index 0000000..4694180 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A0BF5A09 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/ncdf.R\n\\name{getNcdfVar}\n\\alias{getNcdfVar}\n\\title{Get variable name of the NetCDF file.}\n\\usage{\ngetNcdfVar(filePath)\n}\n\\arguments{\n\\item{filePath}{A path pointing to the netCDF file.}\n}\n\\value{\nThe names of the varialbes in the file.\n}\n\\description{\nGet variable name in the NetCDF file. After knowning the name, you can use \\code{loadNcdf} to load\nthe target variable.\n}\n\\examples{\n# First open the test NETcDF file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n\n# Then if you don't know the variable name, you can use \\\\code{getNcdfVar} to get variable name\nvarname <- getNcdfVar(filePath)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\nEarlier) Format Data Files. R package version 1.14.1.\nhttps://CRAN.R-project.org/package=ncdf4\n}\n}\n\n", + "created" : 1487956333313.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "2643611823", + "id" : "A0BF5A09", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getNcdfVar.Rd", + "project_path" : "man/getNcdfVar.Rd", + "properties" : { + }, + "relative_order" : 36, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A2A68A80 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A2A68A80 new file mode 100644 index 0000000..ed24e01 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A2A68A80 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Get ensemble forecast from historical data.\n#' \n#' getHisEnsem use historical data as the forecasting input time series.\n#' \n#' @param TS A time series dataframe, with first column Date, and second column value.\n#' @param example A vector containing two strings showing the start and end date, which represent the \n#' forecasting period. Check details for more information.\n#'\n#' the program will extract every possible period in TS you provided to generate the ensemble. Check details for \n#' more information.\n#' @param interval A number representing the interval of each ensemble member. NOTE: \"interval\" takes\n#' 365 as a year, and 30 as a month, regardless of leap year and months with 31 days. So if you want the interval \n#' to be 2 years, set \\code{interval = 730}, which equals 2 * 365 ; if two months, set \\code{interval = 60}; \n#' 2 days, \\code{interval = 2}, for other numbers that cannot be divided by 365 or 30 without remainder, it will treat the \n#' number as days.By defualt interval is set to be 365, a year.\n#' @param buffer A number showing how many days are used as buffer period for models. Check details for more\n#' information.\n#' \n#' @param plot A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), \n#' 'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.\n#' @param output A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \n#' data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\n#' plots afterwards. NOTE: If \\code{output = 'ggplot'}, the missing value in the data will\n#' be replaced by \\code{mv}, if assigned, default mv is 0.\n#' \n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{getEnsem_comb}.\n#' \n#' @param mv A number showing representing the missing value. When calculating the cumulative value, \n#' missing value will be replaced by mv, default is 0.\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#' \n#' @details \n#' \n#' \\code{example} E.g., if you have a time series from 2000 to 2010. Assuming you are in 2003,\n#' you want to forecast the period from 2003-2-1 to 2003-4-1. Then for each year in your input\n#' time series, every year from 1st Feb to 1st Apr will be extracted to generate the ensemble\n#' forecasts. In this case your input example should be \\code{example = c('2003-2-1', '2003-4-1')}\n#' \n#' \\code{interval} doesn't care about leap year and the months with 31 days, it will take 365 as a year, and 30 as a month.\n#' e.g., if the interval is from 1999-2-1 to 1999-3-1, you should just set interval to 30, although the real interval is 28\n#' days.\n#' \n#' \\code{example} and \\code{interval} controls how the ensemble will be generated. e.g. if the time series is from \n#' 1990-1-1 to 2001-1-1.\n#' \n#' if \\code{example = c('1992-3-1', '1994-1-1')} and \\code{interval = 1095}, note, 1095 = 365 * 3, so the program treat\n#' this as 3 years.\n#' \n#' Then you are supposed to get the ensemble consisting of following part:\n#' \n#' 1. 1992-3-1 to 1994-1-1 first one is the example, and it's NOT start from 1990-3-1.\n#' 2. 1995-3-1 to 1997-1-1 second one starts from 1993, because \"interval\" is 3 years.\n#' 3. 1998-3-1 to 2000-1-1\n#' \n#' because the last one \"2000-3-1 to 2002-1-1\", 2002 exceeds the original TS range, so it will not be included.\n#' \n#' Sometimes, there are leap years and months with 31 days included in some ensemble part, in which case the length of the data will\n#' be different, e.g., 1999-1-1 to 1999-3-1 is 1 day less than 2000-1-1 to 2000-3-1. In this situation,\n#' the data will use example as a standard. If the example is 1999-1-1 to 1999-3-1, then the latter one\n#' will be changed to 2001-1-1 to 2000-2-29, which keeps the start Date and change the end Date.\n#' \n#' If the end date is so important that cannot be changed, try to solve this problem by resetting\n#' the example period, to make the event included in the example.\n#' \n#' Good set of example and interval can generate good ensemble.\n#' \n#' \\code{buffer}\n#' Sometimes the model needs to run for a few days to warm up, before the forecast. E.g., if a forecast starts at\n#' '1990-1-20', for some model like MIKE NAM model, the run needs to be started about 14 days. So the input timeseries\n#' should start from '1990-1-6'.\n#' \n#' Buffer is mainly used for the model hotstart. Sometimes the hot start file cannot contain all the parameters needed,\n#' only some important parameters. In this case, the model needs to run for some time, to make other parameters ready\n#' for the simulation.\n#' \n#' \n#' \\code{name}\n#' Assuming you have two ggplot outputs, you want to plot them together. In this situation, you\n#' need a name column to differentiate one ggplot output from the other. You can assigne this name\n#' by the argument directly, name has to be assigned if \\code{output = 'ggplot'} is selected,\n#' @return A ensemble time series using historical data as forecast.\n#' \n#' @examples\n#' \n#' data(testdl)\n#' \n#' a <- testdl[[1]]\n#' \n#' # Choose example from \"1994-2-4\" to \"1996-1-4\"\n#' b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'))\n#' \n#' # Default interval is one year, can be set to other values, check help for information.\n#' \n#' # Take 7 months as interval\n#' b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, plot = 'cum') \n#' # Take 30 days as buffer\n#' b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, buffer = 30)\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @importFrom reshape2 melt \n#' @importFrom grDevices rainbow\n#' @import ggplot2\n#' @references \n#' \n#' \\itemize{\n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' }\n#' \n#' \n#' @export\n\ngetHisEnsem <- function (TS, example, interval = 365, buffer = 0, plot = 'norm', output = 'data', \n name = NULL, mv = 0, ...) {\n if (!grepl('-|/', TS[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.')\n } else if (!grepl('-|/', example[1]) | !grepl('-|/', example[1])) {\n stop('Wrong date format in the example, check the format in ?as.Date{base} \n and use as.Date to convert.')\n } else {\n \n \n \n TS[, 1] <- as.Date(TS[, 1])\n example <- as.Date(example ,tz = '')\n exL <- example[2] - example[1]\n # Test if example is in the range of the TS\n a <- which(TS[, 1] == example[1] | TS[, 1] == example[2])\n if (length(a) < 2) stop('Example is out of the time series, reset example.')\n \n \n \n if (interval %% 365 == 0) {\n d <- interval / 365\n \n # Get sequence of start and end date.\n \n startDate <- rev(seq(from = example[1], to = min(TS[, 1]), by = paste(-d, 'years')))\n endDate <- seq(from = example[2], to = max(TS[, 1]), by = paste(d, 'years'))\n\n n <- length(startDate) + length(endDate) - 1 # example is counted twice, should be subtracted. \n \n # Generate full start date series.\n startDate <- seq(min(startDate), length = n, by = paste(d, 'years'))\n endDate <- startDate + exL\n \n } else if (interval %% 30) {\n d <- interval / 30\n \n # Get sequence of start and end date.\n \n startDate <- rev(seq(from = example[1], to = min(TS[, 1]), by = paste(-d, 'months')))\n endDate <- seq(from = example[2], to = max(TS[, 1]), by = paste(d, 'months'))\n \n n <- length(startDate) + length(endDate) - 1\n \n startDate <- seq(min(startDate), length = n, by = paste(d, 'months'))\n endDate <- startDate + exL\n \n } else {\n d <- interval\n \n # Get sequence of start and end date.\n \n startDate <- rev(seq(from = example[1], to = min(TS[, 1]), by = paste(-d, 'days')))\n endDate <- seq(from = example[2], to = max(TS[, 1]), by = paste(d, 'days'))\n \n n <- length(startDate) + length(endDate) - 1\n \n startDate <- seq(min(startDate), length = n, by = paste(d, 'days'))\n endDate <- startDate + exL\n }\n \n data <- mapply(FUN = function(x, y) extractPeriod_dataframe(dataframe = TS, startDate = x, endDate = y),\n x = startDate, y = endDate)\n \n data <- lapply(1:n, function(x) data.frame(data[, x]))\n \n if (buffer > 0) {\n bufferStart <- example[1] - buffer\n bufferEnd <- example[1] - 1\n bufferTS <- extractPeriod_dataframe(TS, bufferStart, bufferEnd)\n \n data <- lapply(data, function(x) rbind(bufferTS, x))\n \n } else if (buffer < 0) {\n stop ('Buffer should be positive, or reset example.')\n }\n \n \n data_output <- list2Dataframe(data)\n colnames(data_output) <- c('Date', as.character(startDate))\n \n # Rearrange dataframe to make example the first column.\n ind <- match(c('Date', as.character(example[1])), colnames(data_output))\n # when use cbind, to ensure the output is also a dataframe, one inside cbind should be dataframe\n # Even output is alread a dataframe, but when ind is a single number, then output[ind] will\n # not be a dataframe, but an array.\n data_output <- cbind(data.frame(data_output[ind]), data_output[-ind])\n ex_date <- seq(from = example[1] - buffer, to = example[2], by = 1)\n data_output$Date <- ex_date\n colnames(data_output)[2] <- 'Observation'\n \n meanV <- apply(data_output[, 2:ncol(data_output)], MARGIN = 1, FUN = mean, na.rm = TRUE)\n \n data_output <- cbind(data.frame(Date = data_output[, 1]), Mean = meanV, \n data_output[, 2:ncol(data_output)])\n \n data_ggplot <- melt(data_output, id.var = 'Date')\n NAIndex <- is.na(data_ggplot$value)\n data_ggplot$nav <- rep(0, nrow(data_ggplot))\n data_ggplot$nav[NAIndex] <- 1\n \n if (plot == 'norm') {\n data_ggplot$value[NAIndex] <- mv\n \n } else if (plot == 'cum') {\n data_output[is.na(data_output)] <- mv\n cum <- cbind(data.frame(Date = data_output$Date), cumsum(data_output[2:ncol(data_output)]))\n \n data_ggplot <- melt(cum, id.var = 'Date')\n } else {\n stop('plot can only be \"norm\" or \"cum\", do not assign other words')\n }\n \n #generate different colors \n colors = c('brown1', 'dodgerblue3', rainbow(n = length(unique(data_ggplot$variable)) - 2,\n start = 0.1))\n \n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) +\n aes(x = Date, y = value, color = variable, group = variable) +\n geom_line(size = 0.5) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Observation', ], size = 1.6) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Mean', ], size = 1.6) +\n geom_point(data = data_ggplot[NAIndex, ], size = 3, shape = 4, color = 'black') +\n scale_color_manual(values = colors) +\n labs(empty = NULL, ...) +#in order to pass \"...\", arguments shouldn't be empty.\n theme(axis.text.x = element_text(size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8)))\n })\n print(mainLayer)\n \n if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n data_ggplot$name <- rep(name, nrow(data_ggplot)) \n data_ggplot$nav <- rep(0, nrow(data_ggplot))\n data_ggplot$nav[NAIndex] <- 1\n\n return(data_ggplot)\n } else {\n return(data_output)\n }\n }\n}\n\n\n\n\n\n\n#' Extract time series from forecasting data.\n#' \n#' getFrcEnsem extract timeseries from forecasting data, if forecasting data has a member session\n#' an ensemble time sereis will be returned, if forecasting data doesn't have a member session, a singe time\n#' series will be returned.\n#' \n#' @param dataset A list containing different information, should be the result of \\code{\\link{loadNcdf}}\n#' @param cell A vector containing the locaton of the cell, e.g. c(2, 3), default is \"mean\", representing\n#' the spatially averaged value. Check details for more information.\n#' @param plot A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), \n#' 'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.\n#' @param output A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \n#' data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\n#' plots afterwards. NOTE: If \\code{output = 'ggplot'}, the missing value in the data will\n#' be replaced by \\code{mv}, if assigned, default mv is 0.\n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{getEnsem_comb}.\n#' @param mv A number showing representing the missing value. When calculating the cumulative value, \n#' missing value will be replaced by mv, default is 0.\n#' @param coord A coordinate of longitude and latitude. e.g. corrd = c(lon, lat). If coord is assigned,\n#' cell argument will no longer be used.\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#' \n#' @details \n#' \n#' \\code{cell} representing the location of the cell, NOTE: this location means the index of the cell,\n#' IT IS NOT THE LONGITUDE AND LATITUDE. e.g., \\code{cell = c(2, 3)}, the program will take the 2nd longitude\n#' and 3rd latitude, by the increasing order. Longitude comes first.\n#' \n#' \\code{name}\n#' Assuming you have two ggplot outputs, you want to plot them together. In this situation, you\n#' need a name column to differentiate one ggplot output from the other. You can assigne this name\n#' by the argument directly, If name is not assigned and \\code{output = 'ggplot'} is selected, then\n#' the system time will be selected as name column.\n#' \n#' @examples \n#' \n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n\n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' nc <- loadNcdf(filePath, varname)\n#' a <- getFrcEnsem(nc)\n#' \n#' # If there is no member session in the dataset, a single time sereis will be extracted.\n#' a1 <- getFrcEnsem(tgridData)\n#' \n#' \n#' # The default output is spatially averaged, if there are more than one cells in the dataset, \n#' # the mean value of the cells will be calculated. While if you are interested in special cell, \n#' # you can assign the cell value. You can also directly use longitude and latitude to extract \n#' # time series.\n#' \n#' getSpatialMap(nc, 'mean')\n#' a <- getFrcEnsem(nc, cell = c(6,2))\n#' \n#' # From the map, cell = c(6, 2) means lon = -1.4, lat = 43.2, so you can use corrd to locate\n#' # your research area and extract time series.\n#' b <- getFrcEnsem(nc, coord = c(-1.4, 43.2))\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @return A ensemble time series extracted from forecating data.\n#' \n#' @import ggplot2\n#' @importFrom reshape2 melt\n#' @references \n#' \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and\n#' statistical downscaling. R package version 0.6-0.\n#' https://github.com/SantanderMetGroup/downscaleR/wiki\n#' }\n#' \n#' \n#' @export\ngetFrcEnsem <- function(dataset, cell = 'mean', plot = 'norm', output = 'data', name = NULL,\n mv = 0, coord = NULL, ...) {\n # cell should be a vector showing the location, or mean representing the loacation averaged.\n \n checkHyfo(dataset)\n \n Date <- as.Date(dataset$Dates$start)\n data <- dataset$Data\n \n # Dimension needs to be arranged. Make sure first and second dimension is lat and lon.\n data <- adjustDim(data, ref = c('lon', 'lat', 'time'))\n \n if (!is.null(coord)) {\n cell <- coord2cell(coord, dataset$xyCoords$x, dataset$xyCoords$y)\n } \n \n \n if (!any(attributes(data)$dimensions == 'member')){\n message('There is no member part in the dataset, there will be only one column of value\n returned.')\n \n if (length(cell) == 2) {\n data_ensem <- data[cell[1], cell[2], ]\n \n } else if (cell == 'mean') {\n data_ensem <- apply(data, MARGIN = 3, FUN = mean, na.rm = TRUE)\n # colnames <- 1:ncol(data_ensem)\n \n } else {\n stop('Wrong cell input, check help for information.')\n }\n \n } else {\n \n if (length(cell) == 2) {\n data_ensem <- data[cell[1], cell[2], , ]\n meanV <- apply(data_ensem, MARGIN = 1, FUN = mean, na.rm = TRUE)\n data_ensem <- data.frame('Mean' = meanV, data_ensem) \n \n } else if (cell == 'mean') {\n data_ensem <- apply(data, MARGIN = c(3, 4), FUN = mean, na.rm = TRUE)\n # colnames <- 1:ncol(data_ensem)\n meanV <- apply(data_ensem, MARGIN = 1, FUN = mean, na.rm = TRUE)\n data_ensem <- data.frame('Mean' = meanV, data_ensem)\n \n } else {\n stop('Wrong cell input, check help for information.')\n }\n }\n\n \n data_output <- data.frame(Date, data_ensem)\n data_ggplot <- melt(data_output, id.var = 'Date')\n NAIndex <- is.na(data_ggplot$value)\n \n \n if (plot == 'norm') {\n data_ggplot$value[NAIndex] <- mv\n } else if (plot == 'cum') {\n data_output[is.na(data_output)] <- mv\n cum <- cbind(data.frame(Date = data_output$Date), cumsum(data_output[2:ncol(data_output)]))\n \n data_ggplot <- melt(cum, id.var = 'Date')\n \n }\n \n colors = c('brown1', rainbow(n = length(unique(data_ggplot$variable)) - 1,\n start = 0.1))\n \n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) +\n aes(x = Date, y = value, color = variable) +\n geom_line(size = 0.5) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Mean', ], size = 1.6, color = 'red') +\n geom_point(data = data_ggplot[NAIndex, ], size = 2, shape = 4, color = 'black') +\n scale_color_manual(values = colors) +\n theme(axis.text.x = element_text(size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8))) +\n labs(empty = NULL, ...)#in order to pass \"...\", arguments shouldn't be empty.\n \n })\n print(mainLayer)\n \n if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n \n data_ggplot$name <- rep(name, nrow(data_ggplot)) \n data_ggplot$nav <- rep(0, nrow(data_ggplot))\n data_ggplot$nav[NAIndex] <- 1\n return(data_ggplot)\n } else {\n return(data_output)\n }\n}\n\n\n\n#' Combine ensembles together\n#' @param ... different ensembles generated by \\code{getHisEnsem(, output = 'ggplot')} \n#' or \\code{getFrcEnsem(, output = 'ggplot')}, see details.\n#' @param nrow A number showing the number of rows.\n#' @param list If input is a list containing different ggplot data, use \\code{list = inputlist}.\n#' @param legend A boolean representing whether you want the legend. Sometimes when you combine\n#' plots, there will be a lot of legends, if you don't like it, you can turn it off by setting\n#' \\code{legend = FALSE}, default is TRUE.\n#' @param x A string of x axis name.\n#' @param y A string of y axis name.\n#' @param title A string of the title.\n#' @param output A boolean, if chosen TRUE, the output will be given.\n#' @return A combined ensemble plot.\n#' @examples \n#' \n#' data(testdl)\n#' \n#' a <- testdl[[1]]\n#' \n#' # Choose example from \"1994-2-4\" to \"1996-1-4\"\n#' \n#' \n#' b1<- getHisEnsem(a, example = c('1995-2-4', '1996-1-4'), plot = 'cum', output = 'ggplot',\n#' name = 1)\n#' \n#' b2 <- getHisEnsem(a, example = c('1995-4-4', '1996-3-4'), plot = 'cum', output = 'ggplot',\n#' name = 2)\n#' \n#' getEnsem_comb(b1, b2)\n#' getEnsem_comb(list = list(b1, b2), nrow = 2)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @export\n#' @import ggplot2\n#' @importFrom data.table rbindlist\n#' @references \n#' \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and\n#' statistical downscaling. R package version 0.6-0.\n#' https://github.com/SantanderMetGroup/downscaleR/wiki\n#' }\n#' \n#' \n#' \n\ngetEnsem_comb <- function(..., list = NULL, nrow = 1, legend = TRUE, x = '', y = '', title = '', \n output = FALSE) {\n \n if (!is.null(list)) {\n checkBind(list, 'rbind')\n data_ggplot <- rbindlist(list)\n } else {\n plots <- list(...)\n checkBind(plots, 'rbind')\n data_ggplot <- rbindlist(plots)\n } \n #data_ggplot$name <- factor(data_ggplot$name, levels = data_ggplot$name, ordered = TRUE)\n \n if (!class(data_ggplot)[1] == 'data.table') {\n warning('Your input is probably a list, but you forget to add \"list = \" before it.\n Try again, or check help for more information.')\n } else if (is.null(data_ggplot$name)) {\n stop('No \"Name\" column in the input data, check the arguments in getFreEnsem() or getHisEnsem(), if \n output = \"ggplot\" is assigned, more info please check ?getFreEnsem() or ?getHisEnsem().')\n }\n \n colors = c('brown1', 'dodgerblue3', rainbow(n = length(unique(data_ggplot$variable)) - 2,\n start = 0.1))\n \n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) +\n aes(x = Date, y = value, color = variable) +\n geom_line(size = 0.5) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Mean', ], size = 1.6) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Observation', ], size = 1.6) +\n geom_point(data = data_ggplot[data_ggplot$nav == 1, ], size = 2, shape = 4, color = 'black') +\n scale_color_manual(values = colors) +\n theme(axis.text.x = element_text(size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8))) +\n facet_wrap( ~ name, nrow = nrow) +\n labs(x = x, y = y, title = title)\n \n })\n if (legend == FALSE) {\n mainLayer <- mainLayer + \n theme(legend.position = 'none')\n# following ones are to add label, may be added in future.\n# geom_text(data = data_ggplot[data_ggplot$Date == '2003-12-10', ], aes(label = variable), hjust = 0.7, vjust = 1)\n# geom_text(data = data_ggplot[data_ggplot$variable == 'Mean', ], aes(label = variable), hjust = 0.7, vjust = 1)\n }\n \n \n print(mainLayer)\n \n if (output == TRUE) return(data_ggplot)\n \n}", + "created" : 1487522564089.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3298987581", + "id" : "A2A68A80", + "lastKnownWriteTime" : 1488015925, + "last_content_update" : 1488015925019, + "path" : "~/GitHub/hyfo/R/getEnsemble.R", + "project_path" : "R/getEnsemble.R", + "properties" : { + }, + "relative_order" : 17, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A879E0CC b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A879E0CC new file mode 100644 index 0000000..3f29698 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A879E0CC @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Collect data from different csv files.\n#' \n#' @param folderName A string showing the path of the folder holding different csv files.\n#' @param fileType A string showing the file type, e.g. \"txt\", \"csv\", \"excel\".\n#' @param range A vector containing startRow, endRow, startColumn, endColumn, e.g., \n#' c(2,15,2,3)\n#' @param sheetIndex A number showing the sheetIndex in the excel file, if fileType is excel,\n#' sheetIndex has to be provided, default is 1.\n#' @return The collected data from different files in the folder.\n#' @examples \n#' \n#' #use internal data as an example.\n#' folder <- file.path(path.package(\"hyfo\"), 'extdata')\n#' # file may vary with different environment, it if doesn't work, use local way to get\n#' # folder path.\n#' \n#' a <- collectData(folder, fileType = 'csv', range = c(10, 20, 1,2))\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @importFrom data.table rbindlist\ncollectData <- function(folderName, fileType = NULL, range = NULL, sheetIndex = 1){\n \n message('All the files in the folder should have the same format')\n \n if (is.null(fileType)) stop('Please enter fileType, \"txt\", \"csv\" or \"excel\".')\n \n if (length(range) > 4) {\n stop('\"range\" should be c(startRow, endRow, startCol, endCol)')\n }else if (is.null(range)) {\n stop('\"range\" can not be blank, e.g., range <- c(startRow, endRow, startCol, endCol).')\n }\n \n if (fileType == 'csv') {\n fileNames <- list.files(folderName, pattern = '*.csv', full.names = TRUE)\n if (length(fileNames) == 0) stop('No csv file in the folder.')\n \n data <- lapply(fileNames, readCsv, range = range)\n data <- rbindlist(data)\n \n } else if (fileType == 'txt') {\n fileNames <- list.files(folderName, pattern = '*.txt', full.names = TRUE)\n if (length(fileNames) == 0) {\n fileNames <- list.files(folderName, pattern = '*.TXT', full.names = TRUE)\n }\n if (length(fileNames) == 0) stop('No text file in the folder.')\n message('For txt file, only startRow and endRow will be considered.')\n data <- lapply(fileNames, readTxt, range = range)\n data <- unlist(data)\n \n# In order not to introduce too much trouble to user, this part has been hiden\n# Because it needs java environment installed.\n#\n } else if (fileType == 'excel') {\n \n message('This part needs java installed in your computer, so it is commentted in\n the original file, check the original R file or https://github.com/Yuanchao-Xu/hyfo/blob/master/R/collectData.R\n for ideas.')\n# fileNames <- list.files(folderName, pattern = '*.xlsx', full.names = TRUE)\n# if (length(fileNames) == 0){\n# fileNames <- list.files(folderName, pattern = '*.xls', full.names = TRUE)\n# }\n# \n# if (length(fileNames) == 0) stop('No excel in the folder.')\n# data <- lapply(fileNames, readExcel, range = range, sheetIndex = sheetIndex)\n# checkBind(data, 'rbind')\n# data <- do.call('rbind', data)\n }else{\n stop('fileType should be \"txt\", \"csv\" or \"excel\".')\n }\n \n \n return(data)\n \n}\n\n# #importFrom xlsx read.xls\n# readExcel <- function(fileName, range, sheetIndex){\n# data <- read.xls(fileName, sheetIndex = sheetIndex, rowIndex = seq(range[1], range[2]),\n# colIndex = seq(range[3], range[4])) \n# colnames(data) <- seq(1, dim(data)[2])\n# \n# message(fileName) \n# return(data)\n# }\n\nreadTxt <- function(fileName, range){\n data <- readLines(fileName)\n data <- data[range[1]:range[2]]\n return(data)\n}\n\n\n\n#' @importFrom utils read.csv\n#' @references \n#' R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\nreadCsv <- function(fileName, range){\n \n data <- read.csv(fileName, skip = range[1] - 1, header = FALSE)\n data <- data[1:(range[2] - range[1] + 1), range[3]:range[4]]\n \n return(data)\n}\n\n\n\n\n\n", + "created" : 1487522346501.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "279517550", + "id" : "A879E0CC", + "lastKnownWriteTime" : 1488015924, + "last_content_update" : 1488015924823, + "path" : "~/GitHub/hyfo/R/collectData.R", + "project_path" : "R/collectData.R", + "properties" : { + }, + "relative_order" : 14, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A87A7AF6 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A87A7AF6 new file mode 100644 index 0000000..162c2d7 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/A87A7AF6 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Get a catchment object from selected shape file.\n#' @param filePath A string representing the path of the shape file.\n#' @return A catchment object can be used in \\code{getSpatialMap()}.\n#' @export\n#' @details This function is based on the package \\code{rgdal} and \\code{sp}, and the output comes from the package \n#' \\code{sp}\n#' @examples\n#' #open internal file\n#' file <- system.file(\"extdata\", \"testCat.shp\", package = \"hyfo\")\n#' catchment <- shp2cat(file)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @import rgdal\n#' @importFrom utils tail\n#' @references \n#' \n#' \\itemize{\n#' \\item Roger Bivand, Tim Keitt and Barry Rowlingson (2015). rgdal: Bindings for the Geospatial Data\n#' Abstraction Library. R package version 1.0-4. https://CRAN.R-project.org/package=rgdal\n#' \n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' \nshp2cat <- function(filePath) {\n #if the path <- file.choose(), the seperator is '\\\\'\n if (grepl('\\\\\\\\', filePath)) {\n catName <- tail(strsplit(filePath,'\\\\\\\\')[[1]], 1)#needs to be four \\, caused by some window system problem\n catName1 <- strsplit(catName, '\\\\.')[[1]][1]\n catName2 <- paste('\\\\\\\\', catName, sep = '')\n folderName <- strsplit(filePath, catName2)[[1]]\n n <- list.files(folderName, pattern = catName1)\n if (length(n) == 1) stop('Please place the shp file in the folder containing \n full related files, not only the shape file')\n #the other seperator is '/' \n } else if (grepl('/', filePath)) {\n catName <- tail(strsplit(filePath,'/')[[1]], 1)#needs to be four \\, caused by some window system problem\n catName1 <- strsplit(catName, '\\\\.')[[1]][1]\n catName2 <- paste('/', catName, sep = '')\n folderName <- strsplit(filePath, catName2)[[1]]\n n <- list.files(folderName, pattern = catName1)\n if (length(n) == 1) stop('Please place the shp file in the folder containing \n full related files, not only the shape file')\n }\n \n if (length(folderName) == 0) stop('No shape file found, make sure the shp file is selected.')\n catchment <- readOGR(folderName, catName1)\n return(catchment)\n}\n", + "created" : 1488018106706.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "2883532208", + "id" : "A87A7AF6", + "lastKnownWriteTime" : 1488018115, + "last_content_update" : 1488018115762, + "path" : "~/GitHub/hyfo/R/shp2cat.R", + "project_path" : "R/shp2cat.R", + "properties" : { + }, + "relative_order" : 53, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/AE5809FB b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/AE5809FB new file mode 100644 index 0000000..cede58a --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/AE5809FB @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getEnsemble.R\n\\name{getHisEnsem}\n\\alias{getHisEnsem}\n\\title{Get ensemble forecast from historical data.}\n\\usage{\ngetHisEnsem(TS, example, interval = 365, buffer = 0, plot = \"norm\",\n output = \"data\", name = NULL, mv = 0, ...)\n}\n\\arguments{\n\\item{TS}{A time series dataframe, with first column Date, and second column value.}\n\n\\item{example}{A vector containing two strings showing the start and end date, which represent the \nforecasting period. Check details for more information.\n\nthe program will extract every possible period in TS you provided to generate the ensemble. Check details for \nmore information.}\n\n\\item{interval}{A number representing the interval of each ensemble member. NOTE: \"interval\" takes\n365 as a year, and 30 as a month, regardless of leap year and months with 31 days. So if you want the interval \nto be 2 years, set \\code{interval = 730}, which equals 2 * 365 ; if two months, set \\code{interval = 60}; \n2 days, \\code{interval = 2}, for other numbers that cannot be divided by 365 or 30 without remainder, it will treat the \nnumber as days.By defualt interval is set to be 365, a year.}\n\n\\item{buffer}{A number showing how many days are used as buffer period for models. Check details for more\ninformation.}\n\n\\item{plot}{A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), \n'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.}\n\n\\item{output}{A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \ndata that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\nplots afterwards. NOTE: If \\code{output = 'ggplot'}, the missing value in the data will\nbe replaced by \\code{mv}, if assigned, default mv is 0.}\n\n\\item{name}{If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\ndifferent outputs in the later multiplot using \\code{getEnsem_comb}.}\n\n\\item{mv}{A number showing representing the missing value. When calculating the cumulative value, \nmissing value will be replaced by mv, default is 0.}\n\n\\item{...}{\\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}}\n}\n\\value{\nA ensemble time series using historical data as forecast.\n}\n\\description{\ngetHisEnsem use historical data as the forecasting input time series.\n}\n\\details{\n\\code{example} E.g., if you have a time series from 2000 to 2010. Assuming you are in 2003,\nyou want to forecast the period from 2003-2-1 to 2003-4-1. Then for each year in your input\ntime series, every year from 1st Feb to 1st Apr will be extracted to generate the ensemble\nforecasts. In this case your input example should be \\code{example = c('2003-2-1', '2003-4-1')}\n\n\\code{interval} doesn't care about leap year and the months with 31 days, it will take 365 as a year, and 30 as a month.\ne.g., if the interval is from 1999-2-1 to 1999-3-1, you should just set interval to 30, although the real interval is 28\ndays.\n\n\\code{example} and \\code{interval} controls how the ensemble will be generated. e.g. if the time series is from \n1990-1-1 to 2001-1-1.\n\nif \\code{example = c('1992-3-1', '1994-1-1')} and \\code{interval = 1095}, note, 1095 = 365 * 3, so the program treat\nthis as 3 years.\n\nThen you are supposed to get the ensemble consisting of following part:\n\n1. 1992-3-1 to 1994-1-1 first one is the example, and it's NOT start from 1990-3-1.\n2. 1995-3-1 to 1997-1-1 second one starts from 1993, because \"interval\" is 3 years.\n3. 1998-3-1 to 2000-1-1\n\nbecause the last one \"2000-3-1 to 2002-1-1\", 2002 exceeds the original TS range, so it will not be included.\n\nSometimes, there are leap years and months with 31 days included in some ensemble part, in which case the length of the data will\nbe different, e.g., 1999-1-1 to 1999-3-1 is 1 day less than 2000-1-1 to 2000-3-1. In this situation,\nthe data will use example as a standard. If the example is 1999-1-1 to 1999-3-1, then the latter one\nwill be changed to 2001-1-1 to 2000-2-29, which keeps the start Date and change the end Date.\n\nIf the end date is so important that cannot be changed, try to solve this problem by resetting\nthe example period, to make the event included in the example.\n\nGood set of example and interval can generate good ensemble.\n\n\\code{buffer}\nSometimes the model needs to run for a few days to warm up, before the forecast. E.g., if a forecast starts at\n'1990-1-20', for some model like MIKE NAM model, the run needs to be started about 14 days. So the input timeseries\nshould start from '1990-1-6'.\n\nBuffer is mainly used for the model hotstart. Sometimes the hot start file cannot contain all the parameters needed,\nonly some important parameters. In this case, the model needs to run for some time, to make other parameters ready\nfor the simulation.\n\n\n\\code{name}\nAssuming you have two ggplot outputs, you want to plot them together. In this situation, you\nneed a name column to differentiate one ggplot output from the other. You can assigne this name\nby the argument directly, name has to be assigned if \\code{output = 'ggplot'} is selected,\n}\n\\examples{\n\ndata(testdl)\n\na <- testdl[[1]]\n\n# Choose example from \"1994-2-4\" to \"1996-1-4\"\nb <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'))\n\n# Default interval is one year, can be set to other values, check help for information.\n\n# Take 7 months as interval\nb <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, plot = 'cum') \n# Take 30 days as buffer\nb <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, buffer = 30)\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n\n}\n\\references{\n\\itemize{\n\\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n}\n}\n\n", + "created" : 1487956279845.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "439558619", + "id" : "AE5809FB", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getHisEnsem.Rd", + "project_path" : "man/getHisEnsem.Rd", + "properties" : { + }, + "relative_order" : 33, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/AE7DF6FE b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/AE7DF6FE new file mode 100644 index 0000000..acf39e9 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/AE7DF6FE @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/case_anarbe.R\n\\name{collectData_excel_anarbe}\n\\alias{collectData_excel_anarbe}\n\\title{Collect data from different excel files}\n\\usage{\ncollectData_excel_anarbe(folderName, keyword = NULL, output = TRUE)\n}\n\\arguments{\n\\item{folderName}{A string showing the folder path.}\n\n\\item{keyword}{A string showing the extracted column, e.g., waterLevel, waterBalance.}\n\n\\item{output}{A boolean showing whether the output is given.}\n}\n\\value{\nThe collected data from different excel files.\n}\n\\description{\nCollect data from different excel files\n}\n\\references{\n\\itemize{\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n}\n}\n\n", + "created" : 1487956055852.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "2294040637", + "id" : "AE7DF6FE", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/collectData_excel_anarbe.Rd", + "project_path" : "man/collectData_excel_anarbe.Rd", + "properties" : { + }, + "relative_order" : 24, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/B4F74B5C b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/B4F74B5C new file mode 100644 index 0000000..db1c515 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/B4F74B5C @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "# Generated by roxygen2: do not edit by hand\n\nexport(applyBiasFactor)\nexport(biasCorrect)\nexport(checkBind)\nexport(collectData)\nexport(collectData_csv_anarbe)\nexport(collectData_excel_anarbe)\nexport(collectData_txt_anarbe)\nexport(downscaleNcdf)\nexport(extractPeriod)\nexport(fillGap)\nexport(getAnnual)\nexport(getBiasFactor)\nexport(getEnsem_comb)\nexport(getFrcEnsem)\nexport(getHisEnsem)\nexport(getLMom)\nexport(getMoment)\nexport(getNcdfVar)\nexport(getPreciBar)\nexport(getPreciBar_comb)\nexport(getSpatialMap)\nexport(getSpatialMap_comb)\nexport(getSpatialMap_mat)\nexport(list2Dataframe)\nexport(loadNcdf)\nexport(plotTS)\nexport(plotTS_comb)\nexport(resample)\nexport(shp2cat)\nexport(writeNcdf)\nexportClasses(biasFactor)\nimport(ggplot2)\nimport(maps)\nimport(maptools)\nimport(ncdf4)\nimport(plyr)\nimport(rgdal)\nimport(rgeos)\nimportFrom(MASS,fitdistr)\nimportFrom(data.table,rbindlist)\nimportFrom(grDevices,rainbow)\nimportFrom(lmom,samlmu)\nimportFrom(methods,new)\nimportFrom(methods,setClass)\nimportFrom(methods,setGeneric)\nimportFrom(methods,setMethod)\nimportFrom(moments,kurtosis)\nimportFrom(moments,skewness)\nimportFrom(reshape2,melt)\nimportFrom(stats,aggregate)\nimportFrom(stats,coef)\nimportFrom(stats,cor)\nimportFrom(stats,ecdf)\nimportFrom(stats,lm)\nimportFrom(stats,median)\nimportFrom(stats,na.omit)\nimportFrom(stats,pgamma)\nimportFrom(stats,qgamma)\nimportFrom(stats,quantile)\nimportFrom(stats,rgamma)\nimportFrom(stats,var)\nimportFrom(utils,combn)\nimportFrom(utils,packageDescription)\nimportFrom(utils,read.csv)\nimportFrom(utils,read.fwf)\nimportFrom(utils,tail)\nimportFrom(zoo,as.Date)\n", + "created" : 1487521274814.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3446792241", + "id" : "B4F74B5C", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/NAMESPACE", + "project_path" : "NAMESPACE", + "properties" : { + }, + "relative_order" : 12, + "source_on_save" : false, + "source_window" : "", + "type" : "r_namespace" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/B8E278FD b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/B8E278FD new file mode 100644 index 0000000..4d0735e --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/B8E278FD @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/dataDocument.R\n\\docType{data}\n\\name{testdl}\n\\alias{testdl}\n\\title{testdl}\n\\format{A list consists of 3 different lists.\n\\describe{\n \\item{AAA}{AAA, a dataframe containing a date column and a value column. }\n \\item{BBB}{BBB, a dataframe containing a date column and a value column.}\n \\item{CCC}{CCC, a dataframe containing a date column and a value column.}\n ...\n}}\n\\source{\nhttp://meteo.navarra.es/estaciones/mapadeestaciones.cfm\nhttp://www4.gipuzkoa.net/oohh/web/esp/02.asp\n}\n\\usage{\ntestdl\n}\n\\description{\nA list containing different precipitation time series.\n}\n\\references{\n\\itemize{\n\\item http://meteo.navarra.es/estaciones/mapadeestaciones.cfm\n\\item #' http://www4.gipuzkoa.net/oohh/web/esp/02.asp\n}\n}\n\\keyword{datasets}\n\n", + "created" : 1488014925949.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3744290800", + "id" : "B8E278FD", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/testdl.Rd", + "project_path" : "man/testdl.Rd", + "properties" : { + }, + "relative_order" : 48, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/BFF6AE7A b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/BFF6AE7A new file mode 100644 index 0000000..e312b36 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/BFF6AE7A @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Get annual rainfall of different rainfall time series\n#' \n#' Get annual rainfall of different raninfall time series.\n#' \n#' \n#' @param data A list containing different time series of different rainfall gauges. Or a dataframe with first column Date and the rest columns the value of different\n#' gauging stations. Usually an output of \\code{list2Dataframe}.\n#' @param output A string showing the output output.\n#' @param minRecords A number showing the minimum accept record number, e.g. for a normal \n#' year(365 days), if \\code{minRecords = 360}, it means if a year has less than 360 records\n#' of a year, it will be ignored in the mean annual value calculation. Only valid \n#' when \\code{output = \"mean\"}, default is 355.\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#' @return The annual rainfall and the number of missing data of each year and each rainfall gauge, which \n#' will also be plotted. If output \"mean\" is seleted, the mean annual rainfall will be returned.\n#' @details \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples\n#' #datalist is provided by the package as a test.\n#' data(testdl)\n#' a <- getAnnual(testdl)\n#' #set minRecords to control the calculation of annual rainfall.\n#' b <- getAnnual(testdl, output = 'mean', minRecords = 350)\n#' c <- getAnnual(testdl, output = 'mean', minRecords = 365)\n#' \n#' a1 <- extractPeriod(testdl, comm = TRUE)\n#' a2 <- list2Dataframe(a1)\n#' getAnnual(a2)\n#' \n#' a3 <- fillGap(a2)\n#' getAnnual(a3)\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @importFrom methods setGeneric\n#' \n#' @references \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' \nsetGeneric('getAnnual', function(data, output = 'series', minRecords = 355, \n ...) {\n standardGeneric('getAnnual')\n})\n\n#' @rdname getAnnual\n#' @importFrom methods setMethod\nsetMethod('getAnnual', signature('data.frame'), \n function(data, output, minRecords, ...) {\n result <- getAnnual.TS(data)\n getAnnual.plot(result, output, minRecords, ...)\n return(result)\n})\n\n#' @rdname getAnnual\n#' @importFrom methods setMethod\nsetMethod('getAnnual', signature('list'),\n function(data, output, minRecords, ...) {\n result <- getAnnual.list(data)\n getAnnual.plot(result, output, minRecords, ...)\n return(result)\n })\n\n#' @importFrom data.table rbindlist\ngetAnnual.TS <- function(dataframe) {\n Date <- as.POSIXlt(dataframe[, 1])\n # Calculate how many gauging stations.\n stations <- colnames(dataframe)[2:ncol(dataframe)]\n \n data <- lapply(stations, function(x) {\n dataframe_new <- data.frame(Date, dataframe[, x])\n colnames(dataframe_new)[2] <- x\n getAnnual_dataframe(dataframe_new)\n })\n \n data <- rbindlist(data)\n # After rbind, factor level has to be reassigned in order to be well plotted.\n data$Year <- factor(data$Year, levels = sort(unique(data$Year)), ordered = TRUE)\n rownames(data) <- NULL\n \n return(data)\n}\n\n\n#' @importFrom data.table rbindlist\ngetAnnual.list <- function(datalist) {\n data <- lapply(datalist, FUN = getAnnual_dataframe)\n data <- rbindlist(data)\n # After rbind, factor level has to be reassigned in order to be well plotted.\n data$Year <- factor(data$Year, levels = sort(unique(data$Year)), ordered = TRUE)\n rownames(data) <- NULL\n return(data)\n}\n\n#' @import ggplot2 \n#' @importFrom reshape2 melt\n#' @importFrom stats aggregate\ngetAnnual.plot <- function(data, output, minRecords, ...) {\n theme_set(theme_bw())\n \n if (output == 'mean') {\n validData <- data[data$recordNum >= minRecords,]\n \n data <- aggregate(validData$AnnualPreci, list(validData$Name), mean)\n colnames(data) <- c('Name', 'AnnualPreci')\n \n mainLayer <- with(data, {\n ggplot(data)+\n geom_bar(aes(x = Name, y = AnnualPreci, fill = Name), stat = 'identity')+\n labs(empty = NULL, ...)#in order to pass \"...\", arguments shouldn't be empty.\n \n })\n \n print(mainLayer)\n \n } else {\n \n plotData <- with(data, {\n subset(data, select = c(Year, Name, NANum, AnnualPreci))\n })\n \n plotData <- melt(plotData, var.id = c('Year', 'Name'))\n \n \n mainLayer <- with(plotData, {\n ggplot(plotData) +\n geom_bar(aes(x = Year, y = value , fill = Name), \n stat = 'identity') +\n facet_grid(variable ~ Name, scale = 'free') +\n xlab('Year') +\n ylab(NULL) +\n labs(empty = NULL, ...) +#in order to pass \"...\", arguments shouldn't be empty.\n theme(plot.title = element_text(size = 20, face = 'bold', vjust = 1)) +\n theme(axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1.5)),\n axis.text.y = element_text(size = rel(1.5)))\n # grid.arrange(mainLayer, ncol = 4)\n \n })\n \n \n print(mainLayer)\n } \n}\n\n\n\n\n\n\n\n\n#' Get annual rainfall of the input time series.\n#' \n#' @param dataset A dataframe containing one time series, e.g., rainfall from one gauging station.\n#' the time should follow the format : \"1990-1-1\"\n#' @return The annual rainfall of each year of the input station.\n# @examples\n# data(testdl)\n# getAnnual_dataframe(testdl[[1]])\n#' \ngetAnnual_dataframe <- function(dataset) {\n \n if (!grepl('-|/', dataset[1, 1])) {\n stop ('First column is not date or Wrong Date formate, check the format in ?as.Date{base},\n and use as.Date to convert.')\n }\n Date <- as.Date(dataset[, 1])\n year <- format(Date, '%Y')\n yearUnique <- unique(year)\n # yearUnique <- factor(yearUnique, levels = yearUnique, ordered = TRUE)\n calcuNum <- c(1:length(yearUnique))\n \n \n annualPreci <- tapply(dataset[, 2], INDEX = year, FUN = sum, na.rm = TRUE)\n recordNum <- tapply(dataset[, 2], INDEX = year, function(x) length(which(!is.na(x))))\n NANum <- tapply(dataset[, 2], INDEX = year, function(x) length(which(is.na(x))))\n \n \n name <- rep(colnames(dataset)[2], length(calcuNum))\n output <- data.frame(Year = as.numeric(yearUnique), Name = name, AnnualPreci = annualPreci,\n recordNum, NANum)\n \n #output$Year <- factor(output$Year, levels = output$Year, ordered = TRUE)\n return(output)\n}\n\n", + "created" : 1483876618596.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1161286313", + "id" : "BFF6AE7A", + "lastKnownWriteTime" : 1488015924, + "last_content_update" : 1488015924693, + "path" : "~/GitHub/hyfo/R/getAnnual(generic).R", + "project_path" : "R/getAnnual(generic).R", + "properties" : { + }, + "relative_order" : 4, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CA11BD0A b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CA11BD0A new file mode 100644 index 0000000..eef58bb --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CA11BD0A @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/dataDocument.R\n\\docType{data}\n\\name{tgridData}\n\\alias{tgridData}\n\\title{tgridData}\n\\format{A list containing different information.\n\\describe{\n \\item{Variables}{variable information. }\n \\item{Data}{Data.}\n \\item{xyCoords}{longitude and latitude of the data.}\n \\item{Dates}{Date information.}\n ...\n}}\n\\source{\nhttp://www.meteo.unican.es/datasets/spain02\n}\n\\usage{\ntgridData\n}\n\\description{\nA list containing different information getting from grid data file, e.g., netcdf file.\n}\n\\references{\n\\itemize{\n\\item Herrera, S., Ancell, R., Gutierrez, J. M., Pons, M. R., Frias, M. D., & Fernandez, J. \n(2012). Development and analysis of a 50-year high-resolution daily gridded precipitation dataset \nover Spain (Spain02). International Journal of Climatology \n(http://www.meteo.unican.es/datasets/spain02), 10.1002/joc.2256.\n}\n}\n\\keyword{datasets}\n\n", + "created" : 1484118438453.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3096661772", + "id" : "CA11BD0A", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/tgridData.Rd", + "project_path" : "man/tgridData.Rd", + "properties" : { + }, + "relative_order" : 10, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CA271C51 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CA271C51 new file mode 100644 index 0000000..7e34f3b --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CA271C51 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getAnnual(generic).R\n\\docType{methods}\n\\name{getAnnual}\n\\alias{getAnnual}\n\\alias{getAnnual,data.frame-method}\n\\alias{getAnnual,list-method}\n\\title{Get annual rainfall of different rainfall time series}\n\\usage{\ngetAnnual(data, output = \"series\", minRecords = 355, ...)\n\n\\S4method{getAnnual}{data.frame}(data, output = \"series\", minRecords = 355,\n ...)\n\n\\S4method{getAnnual}{list}(data, output = \"series\", minRecords = 355, ...)\n}\n\\arguments{\n\\item{data}{A list containing different time series of different rainfall gauges. Or a dataframe with first column Date and the rest columns the value of different\ngauging stations. Usually an output of \\code{list2Dataframe}.}\n\n\\item{output}{A string showing the output output.}\n\n\\item{minRecords}{A number showing the minimum accept record number, e.g. for a normal \nyear(365 days), if \\code{minRecords = 360}, it means if a year has less than 360 records\nof a year, it will be ignored in the mean annual value calculation. Only valid \nwhen \\code{output = \"mean\"}, default is 355.}\n\n\\item{...}{\\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}}\n}\n\\value{\nThe annual rainfall and the number of missing data of each year and each rainfall gauge, which \nwill also be plotted. If output \"mean\" is seleted, the mean annual rainfall will be returned.\n}\n\\description{\nGet annual rainfall of different raninfall time series.\n}\n\\details{\nIt is a generic function, if in your case you need to debug, please see \\code{?debug()} \nfor how to debug S4 method.\n}\n\\examples{\n#datalist is provided by the package as a test.\ndata(testdl)\na <- getAnnual(testdl)\n#set minRecords to control the calculation of annual rainfall.\nb <- getAnnual(testdl, output = 'mean', minRecords = 350)\nc <- getAnnual(testdl, output = 'mean', minRecords = 365)\n\na1 <- extractPeriod(testdl, comm = TRUE)\na2 <- list2Dataframe(a1)\ngetAnnual(a2)\n\na3 <- fillGap(a2)\ngetAnnual(a3)\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n\\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n}\n}\n\n", + "created" : 1487956221717.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3081706177", + "id" : "CA271C51", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getAnnual.Rd", + "project_path" : "man/getAnnual.Rd", + "properties" : { + }, + "relative_order" : 29, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CE991F6 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CE991F6 new file mode 100644 index 0000000..6afb391 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/CE991F6 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/analyzeTS.R\n\\name{getMoment}\n\\alias{getMoment}\n\\title{get moment analysis of the input distribution}\n\\usage{\ngetMoment(dis)\n}\n\\arguments{\n\\item{dis}{A distribution, for hydrology usually a time series with only data column without time.}\n}\n\\value{\nThe mean, variation, skewness and kurtosis of the input distribution\n}\n\\description{\nget moment analysis of the input distribution\n}\n\\examples{\ndis <- seq(1, 100)\ngetMoment(dis)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item Lukasz Komsta and Frederick Novomestky (2015). moments: Moments, cumulants, skewness, kurtosis and\nrelated tests. R package version 0.14. https://CRAN.R-project.org/package=moments\n\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n}\n}\n\n", + "created" : 1487956303573.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "4150439112", + "id" : "CE991F6", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getMoment.Rd", + "project_path" : "man/getMoment.Rd", + "properties" : { + }, + "relative_order" : 35, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D0BF85EC b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D0BF85EC new file mode 100644 index 0000000..5f3f316 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D0BF85EC @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getEnsemble.R\n\\name{getEnsem_comb}\n\\alias{getEnsem_comb}\n\\title{Combine ensembles together}\n\\usage{\ngetEnsem_comb(..., list = NULL, nrow = 1, legend = TRUE, x = \"\",\n y = \"\", title = \"\", output = FALSE)\n}\n\\arguments{\n\\item{...}{different ensembles generated by \\code{getHisEnsem(, output = 'ggplot')} \nor \\code{getFrcEnsem(, output = 'ggplot')}, see details.}\n\n\\item{list}{If input is a list containing different ggplot data, use \\code{list = inputlist}.}\n\n\\item{nrow}{A number showing the number of rows.}\n\n\\item{legend}{A boolean representing whether you want the legend. Sometimes when you combine\nplots, there will be a lot of legends, if you don't like it, you can turn it off by setting\n\\code{legend = FALSE}, default is TRUE.}\n\n\\item{x}{A string of x axis name.}\n\n\\item{y}{A string of y axis name.}\n\n\\item{title}{A string of the title.}\n\n\\item{output}{A boolean, if chosen TRUE, the output will be given.}\n}\n\\value{\nA combined ensemble plot.\n}\n\\description{\nCombine ensembles together\n}\n\\examples{\n\ndata(testdl)\n\na <- testdl[[1]]\n\n# Choose example from \"1994-2-4\" to \"1996-1-4\"\n\n\nb1<- getHisEnsem(a, example = c('1995-2-4', '1996-1-4'), plot = 'cum', output = 'ggplot',\n name = 1)\n \nb2 <- getHisEnsem(a, example = c('1995-4-4', '1996-3-4'), plot = 'cum', output = 'ggplot',\n name = 2)\n\ngetEnsem_comb(b1, b2)\ngetEnsem_comb(list = list(b1, b2), nrow = 2)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n\n}\n\\references{\n\\itemize{\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n\\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and\nstatistical downscaling. R package version 0.6-0.\nhttps://github.com/SantanderMetGroup/downscaleR/wiki\n}\n}\n\n", + "created" : 1487956253621.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "814409775", + "id" : "D0BF85EC", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getEnsem_comb.Rd", + "project_path" : "man/getEnsem_comb.Rd", + "properties" : { + }, + "relative_order" : 31, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D1FE15E0 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D1FE15E0 new file mode 100644 index 0000000..7318996 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D1FE15E0 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getPreciBar(generic).R\n\\docType{methods}\n\\name{getPreciBar}\n\\alias{getPreciBar}\n\\alias{getPreciBar,data.frame-method}\n\\alias{getPreciBar,list-method}\n\\title{get mean rainfall bar plot of the input dataset or time series.}\n\\usage{\ngetPreciBar(data, method, cell = \"mean\", output = \"data\", name = NULL,\n plotRange = TRUE, member = NULL, omitNA = TRUE, info = FALSE, ...)\n\n\\S4method{getPreciBar}{list}(data, method, cell = \"mean\", output = \"data\",\n name = NULL, plotRange = TRUE, member = NULL, omitNA = TRUE,\n info = FALSE, ...)\n\n\\S4method{getPreciBar}{data.frame}(data, method, cell = \"mean\",\n output = \"data\", name = NULL, plotRange = TRUE, member = NULL,\n omitNA = TRUE, info = FALSE, ...)\n}\n\\arguments{\n\\item{data}{A list containing different information, should be the result of reading netcdf file using\n\\code{\\link{loadNcdf}}, or a time series, with first column the Date, second the value.\nTime series can be an ENSEMBLE containning different members. Than the mean value will be given and the range will be given.}\n\n\\item{method}{A string showing the calculating method of the input time series. More information\nplease refer to the details.}\n\n\\item{cell}{A vector containing the locaton of the cell, e.g. c(2, 3), default is \"mean\", representing\nthe spatially averaged value. Check details for more information.}\n\n\\item{output}{A string showing the type of the output, if \\code{output = 'ggplot'}, the returned \ndata can be used in ggplot and \\code{getPreciBar_comb()}; if \\code{output = 'plot'}, the returned data is the plot containing all \nlayers' information, and can be plot directly or used in grid.arrange; if not set, the data\nwill be returned.}\n\n\\item{name}{If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\ndifferent outputs in the later multiplot using \\code{getSpatialMap_comb}.}\n\n\\item{plotRange}{A boolean showing whether the range will be plotted.}\n\n\\item{member}{A number showing which member is selected to get, if the dataset has a \"member\" dimension. Default\nis NULL, if no member assigned, and there is a \"member\" in dimensions, the mean value of the members will be\ntaken.}\n\n\\item{omitNA}{A boolean showing whether the missing value is omitted.}\n\n\\item{info}{A boolean showing whether the information of the map, e.g., max, mean ..., default is FALSE.}\n\n\\item{...}{\\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}}\n}\n\\value{\nThe calculated mean value of the input time series and the plot of the result.\n}\n\\description{\nget mean rainfall bar plot of the input dataset or time series.\n}\n\\details{\nThere are following methods to be selected, \n\"annual\": annual rainfall of each year is plotted. \n\"winter\", \"spring\", \"autumn\", \"summer\": seasonal rainfall of each year is plotted.\nMonth(number 1 to 12): month rainfall of each year is plotted, e.g. march rainfall of each year.\n\"meanMonthly\": the mean monthly rainfall of each month over the whole period.\n\n#Since \"winter\" is a crossing year, 12, 1, 2, 12 is in former year, and 1, 2 are in latter year.\n#so winter belongs to the latter year.\n\n\n\\code{cell} representing the location of the cell, NOTE: this location means the index of the cell,\nIT IS NOT THE LONGITUDE AND LATITUDE. e.g., \\code{cell = c(2, 3)}, the program will take the 2nd longitude\nand 3rd latitude, by the increasing order. Longitude comes first.\n\n\nIt is a generic function, if in your case you need to debug, please see \\code{?debug()} \nfor how to debug S4 method.\n}\n\\examples{\n#gridData provided by package is the result of \\\\code{loadNcdf()}\ndata(tgridData)\nb1 <- getPreciBar(tgridData, method = 'annual')\nb2 <- getPreciBar(tgridData, method = 'meanMonthly')\n\ndata(testdl)\nTS <- testdl[[1]]\na <- getPreciBar(TS, method = 'spring')\n# if info = T, the information will be given at the bottom.\na <- getPreciBar(TS, method = 'spring', info = TRUE)\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n}\n}\n\n", + "created" : 1487956352900.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "665375225", + "id" : "D1FE15E0", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getPreciBar.Rd", + "project_path" : "man/getPreciBar.Rd", + "properties" : { + }, + "relative_order" : 37, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D22A91DA b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D22A91DA new file mode 100644 index 0000000..696978f --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D22A91DA @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/multi-biasCorrect(generic).R\n\\docType{methods}\n\\name{applyBiasFactor}\n\\alias{applyBiasFactor}\n\\alias{applyBiasFactor,data.frame,biasFactor-method}\n\\alias{applyBiasFactor,list,biasFactor.hyfo-method}\n\\title{Apply bias factor to different forecasts for multi/operational/real time bias correction.}\n\\usage{\napplyBiasFactor(frc, biasFactor, obs = NULL)\n\n\\S4method{applyBiasFactor}{data.frame,biasFactor}(frc, biasFactor, obs = NULL)\n\n\\S4method{applyBiasFactor}{list,biasFactor.hyfo}(frc, biasFactor, obs = NULL)\n}\n\\arguments{\n\\item{frc}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \nrepresenting the frc data. Check details for more information.}\n\n\\item{biasFactor}{a file containing all the information of the calibration, will be\napplied to different forecasts.}\n\n\\item{obs}{for some methods, observation input is necessary. obs is a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \nrepresenting the observation data. Default value is NULL.}\n}\n\\description{\nWhen you do multi/operational/real time bias correction. It's too expensive\nto input hindcast and obs every time. Especially when you have a long period of hindcast\nand obs, but only a short period of frc, it's too unecessary to read and compute hindcast\nand obs everytime. Therefore, biasFactor is designed. Using \\code{getBiasFactor}, you can\nget the biasFactor with hindcast and observation, then you can use \\code{applyBiasFactor} to \napply the biasFactor to different forecasts.\n}\n\\details{\nInformation about the method and how biasCorrect works can be found in \\code{\\link{biasCorrect}}\n\n\\strong{why use biasFactor}\n\nAs for forecasting, for daily data, there is usually no need to have\ndifferent bias factor every different day. You can calculate one bisa factor using a long\nperiod of hindcast and obs, and apply that factor to different frc.\n\nFor example,\n\nYou have 10 years of hindcast and observation. you want to do bias correction for some \nforecasting product, e.g. system 4. For system 4, each month, you will get a new forecast\nabout the future 6 months. So if you want to do the real time bias correction, you have to\ntake the 10 years of hindcast and observation data with you, and run \\code{biasCorrect} every\ntime you get a new forecast. That's too expensive.\n\nFor some practical use in forecasting, there isn't a so high demand for accuracy. E.g.,\nMaybe for February and March, you can use the same biasFactor, no need to do the computation \nagain. \n\n\nIt is a generic function, if in your case you need to debug, please see \\code{?debug()} \nfor how to debug S4 method.\n}\n\\examples{\n\n######## hyfo grid file biascorrection\n########\n\n# If your input is obtained by \\\\code{loadNcdf}, you can also directly biascorrect\n# the file.\n\n# First load ncdf file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\nvarname <- getNcdfVar(filePath) \nnc <- loadNcdf(filePath, varname)\n\ndata(tgridData)\n#' # Since the example data, has some NA values, the process will include some warning #message, \n# which can be ignored in this case.\n\n\n\n# Then we will use nc data as forecasting data, and use itself as hindcast data,\n# use tgridData as observation.\n\nbiasFactor <- getBiasFactor(nc, tgridData)\nnewFrc <- applyBiasFactor(nc, biasFactor)\n \nbiasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant',\npreci = TRUE)\n# This method needs obs input.\nnewFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData)\n\nbiasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE)\nnewFrc <- applyBiasFactor(nc, biasFactor) \n\n\n######## Time series biascorrection\n########\n\n# Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\ndata(testdl)\n\n# common period has to be extracted in order to better train the forecast.\n\ndatalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n\nfrc <- datalist[[1]]\nhindcast <- datalist[[2]]\nobs <- datalist[[3]]\n\n\n# The data used here is just for example, so there could be negative data.\n\n# default method is scaling\nbiasFactor <- getBiasFactor(hindcast, obs)\nfrc_new <- applyBiasFactor(frc, biasFactor)\n\n# for precipitation data, extra process needs to be executed, so you have to tell\n# the program to it is a precipitation data.\n\nbiasFactor <- getBiasFactor(hindcast, obs, preci = TRUE)\nfrc_new1 <- applyBiasFactor(frc, biasFactor)\n\n# You can use other methods to biascorrect, e.g. delta method. \nbiasFactor <- getBiasFactor(hindcast, obs, method = 'delta')\n# delta method needs obs input.\nfrc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs)\n\n# \nbiasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE)\n# eqm needs obs input\nfrc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs)\n\nbiasFactor <- getBiasFactor(hindcast, obs, method = 'gqm', preci = TRUE)\nfrc_new4 <- applyBiasFactor(frc, biasFactor)\n\nplotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n\n# You can also give name to this input list.\nTSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\nnames(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\nplotTS(list = TSlist, plot = 'cum')\n\n\n# If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n# for example Dec, Jan and Feb of every year from year 1999-2005.\n# In such case, you need to extract certain months and years from observed time series.\n# extractPeriod() can be then used.\n \n \n\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n\n}\n\\author{\nYuanchao Xu \\email{xuyuanchao37@gmail.com }\n}\n\\references{\nBias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n\n\\itemize{\n\n\\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\npackage version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n\n\\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n\n\\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n\n\\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n\n\\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n}\n}\n\\seealso{\n\\code{\\link{biasCorrect}} for method used in bias correction. \n\\code{\\link{getBiasFactor}}, for the first part.\n}\n\n", + "created" : 1487955904024.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1640022767", + "id" : "D22A91DA", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/applyBiasFactor.Rd", + "project_path" : "man/applyBiasFactor.Rd", + "properties" : { + }, + "relative_order" : 21, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D3DE8C31 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D3DE8C31 new file mode 100644 index 0000000..4ef0544 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/D3DE8C31 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Extract period from list or dataframe.\n#' \n#' Extract common period or certain period from a list of different dataframes of time series, or from a \n#' dataframe. \n#' NOTE: all the dates in the datalist should follow the format in ?as.Date{base}.\n#' @param data A list of different dataframes of time series, or a dataframe with first column Date, the rest columns value.\n#' @param startDate A Date showing the start of the extract period, default as NULL, check details.\n#' @param endDate A Date showing the end of the extract period, default as NULL, check details.\n#' @param commonPeriod A boolean showing whether the common period is extracted. If chosen, startDate and endDate\n#' should be NULL.\n#' @param year extract certain year in the entire time series. if you want to extract year 2000, set \\code{year = 2000}\n#' @param month extract certain months in a year. e.g. if you want to extract Jan, Feb of each year, \n#' set \\code{month = c(1, 2)}.\n#' @details \n#' \\strong{startDate and endDate}\n#' \n#' If startDate and endDate are assigned, then certain period between startDate and endDate will be returned, \n#' for both datalist input and dataframe input.\n#' \n#' If startDate and endDate are NOT assigned, then,\n#' \n#' if input is a datalist, the startDate and endDate of the common period of different datalists will be assigned\n#' to the startDate and endDate.\n#' \n#' if input is a dataframe, the startDate and endDate of the input dataframe will be assigned to the startDate\n#' and endDate . Since different value columns share a common Date column in a dataframe input. \n#' \n#' \\strong{year and month}\n#' \n#' For year crossing month input, hyfo will take from the year before. E.g. if \\code{month = c(10, 11, 12, 1)},\n#' and \\code{year = 1999}, hyfo will take month 10, 11 and 12 from year 1998, and month 1 from 1999.You DO NOT \n#' have to set \\code{year = 1998 : 1999}.\n#' \n#' Well, if you set \\code{year = 1998 : 1999}, hyfo will take month 10, 11 and 12 from year 1997, and month 1 from 1998,\n#' then, take month 10, 11 and 12 from year 1998, month 1 from 1999. So you only have to care about the latter year.\n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @return A list or a dataframe with all the time series inside containing the same period.\n#' @examples\n#' # Generate timeseries datalist. Each data frame consists of a Date and a value.\n#' \n#' AAA <- data.frame(\n#' # date column\n#' Date = seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1),\n#' # value column\n#' AAA = sample(1:100,length(seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1)), repl = TRUE))\n#' \n#' BBB <- data.frame(\n#' Date = seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1), \n#' BBB = sample(1:100,length(seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1)), repl = TRUE))\n#' \n#' CCC <- data.frame(\n#' Date = seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1), \n#' CCC = sample(1:100,length(seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1)), repl = TRUE)) \n#' \n#' list <- list(AAA, BBB, CCC)# dput() and dget() can be used to save and load list file.\n#' \n#' list_com <- extractPeriod(list, commonPeriod = TRUE)\n#' \n#' # list_com is the extracted datalist.\n#' str(list_com)\n#' \n#' # If startDate and endDate is provided, the record between them will be extracted.\n#' # make sure startDate is later than any startDate in each dataframe and endDate is \n#' # earlier than any endDate in each dataframe.\n#' \n#' data(testdl)\n#' datalist_com1 <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' \n#' dataframe <- list2Dataframe(datalist_com1)\n#' # now we have a dataframe to extract certain months and years.\n#' dataframe_new <- extractPeriod(dataframe, month = c(1,2,3))\n#' dataframe_new <- extractPeriod(dataframe, month = c(12,1,2), year = 1995)\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @importFrom zoo as.Date\n#' @references \n#' \n#' \\itemize{\n#' \\item Achim Zeileis and Gabor Grothendieck (2005). zoo: S3 Infrastructure for Regular and Irregular Time\n#' Series. Journal of Statistical Software, 14(6), 1-27. URL https://www.jstatsoft.org/v14/i06/\n#' }\n#'\n#' @export\nsetGeneric('extractPeriod', function(data, startDate = NULL, endDate = NULL, commonPeriod = FALSE, \n year = NULL, month = NULL) {\n standardGeneric('extractPeriod')\n})\n\n\n#' @rdname extractPeriod\n#' @importFrom methods setMethod\nsetMethod('extractPeriod', signature('data.frame'),\n function(data, startDate, endDate, commonPeriod, year, month) {\n dataframe <- data\n dataset <- extractPeriod_dataframe(dataframe, startDate = startDate, endDate = endDate, year = year,\n month = month)\n return(dataset)\n \n})\n\n\n#' @rdname extractPeriod\n#' @importFrom methods setMethod\nsetMethod('extractPeriod', signature('list'),\n function(data, startDate, endDate, commonPeriod, year, month) {\n datalist <- data\n if (!is.null(startDate) & !is.null(endDate) & commonPeriod == FALSE) {\n dataset <- lapply(data, extractPeriod_dataframe, startDate = startDate, endDate = endDate, year = year,\n month = month)\n } else if (is.null(startDate) & is.null(endDate) & commonPeriod == TRUE) {\n \n Dates <- lapply(datalist, extractPeriod_getDate)\n # Here don't know why rbindlist cannot work, change back to do.call\n Dates <- do.call('rbind', Dates)\n \n startDate <- as.Date(max(Dates[, 1]))\n endDate <- as.Date(min(Dates[, 2]))\n \n dataset <- lapply(datalist, extractPeriod_dataframe, startDate = startDate, endDate = endDate, year = year,\n month = month)\n \n } else {\n stop('Enter startDate and endDate, set commonPeriod as False, or simply set commonPeriod as TRUE')\n }\n return(dataset)\n })\n\n\n\n\nextractPeriod_dataframe <- function(dataframe, startDate, endDate, year = NULL, month = NULL) {\n # to check whether first column is a date format\n if (!grepl('-|/', dataframe[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.')\n } \n dataframe[, 1] <- as.Date(dataframe[, 1])\n \n if (is.null(startDate)) startDate <- dataframe[1, 1]\n if (is.null(endDate)) endDate <- tail(dataframe[, 1], 1)\n \n startIndex <- which(dataframe[, 1] == startDate)\n endIndex <- which(dataframe[, 1] == endDate)\n if (length(startIndex) == 0 | length(endIndex) == 0) {\n stop('startDate and endDate exceeds the date limits in dataframe. Check datalsit please.')\n }\n output <- dataframe[startIndex:endIndex, ]\n \n \n if (!is.null(year)) {\n Date <- as.POSIXlt(output[, 1])\n yea <- Date$year + 1900\n mon <- Date$mon + 1\n \n if (is.null(month) || !any(sort(month) != month)) {\n DateIndex <- which(yea %in% year)\n if (length(DateIndex) == 0) stop('No input years in the input ts, check your input.')\n \n output <- output[DateIndex, ]\n \n # if year crossing than sort(month) != month, in this case we need to\n # take months from last year.\n } else {\n \n \n startIndex <- intersect(which(yea == year[1] - 1), which(mon == month[1]))[1]\n endIndex <- tail(intersect(which(yea == tail(year, 1)), which(mon == tail(month, 1))), 1)\n \n \n if (is.na(startIndex) || length(endIndex) == 0 || startIndex > endIndex) {\n stop('Cannot find input months and input years in the input time series.')\n }\n output <- output[startIndex:endIndex, ]\n \n if (any(diff(year) != 1)) {\n # if year is not continuous, like 1999, 2003, 2005, than we have to sift again. \n Date <- as.POSIXlt(output[, 1])\n yea <- Date$year + 1900\n mon <- Date$mon + 1\n \n DateIndex <- unlist(sapply(year, function(x) {\n startIndex <- intersect(which(yea == x - 1), which(mon == month[1]))[1]\n endIndex <- tail(intersect(which(yea == x), which(mon == tail(month, 1))), 1)\n index <- startIndex:endIndex\n return(index)\n }))\n \n \n output <- output[DateIndex, ]\n \n # cannot directly return output here, because sometimes, month can be incontinuous,\n # we still need the next process to sift month.\n }\n }\n \n }\n \n \n if (!is.null(month)) {\n Date <- as.POSIXlt(output[, 1])\n mon <- Date$mon + 1\n \n # %in% can deal with multiple equalities\n DateIndex <- which(mon %in% month)\n \n if (length(DateIndex) == 0) stop('No input months in the input ts, check your input.')\n \n output <- output[DateIndex, ]\n }\n \n \n return(output) \n }\n\n\n#' @importFrom utils tail\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' \nextractPeriod_getDate <- function(dataset) {\n \n if (!grepl('-|/', dataset[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base}, \n and use as.Date to convert.')\n }\n start <- as.Date(dataset[1, 1])\n end <- as.Date(tail(dataset[, 1], 1))\n \n \n return(c(start, end))\n }\n\n\n\n", + "created" : 1487522389624.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "760401439", + "id" : "D3DE8C31", + "lastKnownWriteTime" : 1488015924, + "last_content_update" : 1488015924894, + "path" : "~/GitHub/hyfo/R/extractPeriod(generic).R", + "project_path" : "R/extractPeriod(generic).R", + "properties" : { + }, + "relative_order" : 15, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/E1CE201C b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/E1CE201C new file mode 100644 index 0000000..81807b8 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/E1CE201C @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "# hyfo\n[![Travis-CI Build Status](https://travis-ci.org/Yuanchao-Xu/hyfo.svg?branch=master)](https://travis-ci.org/Yuanchao-Xu/hyfo)\n[![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/hyfo)](https://cran.r-project.org/package=hyfo)\n\n## Installation\n\nReleased version from CRAN, for beginners and normal users:\n\n```R\ninstall.packages(\"hyfo\")\n```\n\nDevelopment version from github, for experienced users and those who are interested in investigating:\n\n```R\ninstall.packages(\"devtools\")\n# You can ignore the line above, if you have already installed devtools\ndevtools::install_github(\"Yuanchao-Xu/hyfo\")\n```\n\n**Official Website is [https://yuanchao-xu.github.io/hyfo](http://yuanchao-xu.github.io/hyfo)**\n\nhyfo is an R package, initially designed for the European Project EUPORIAS, and cooperated with DHI Denmark, which was then extended to other uses in hydrology, hydraulics and climate.\n\nThis package mainly focuses on data process and visulization in hydrology and climate forecasting. Main function includes NetCDF file processing, data extraction, data downscaling, data resampling, gap filler of precipitation, bias correction of forecasting data, flexible time series plot, and spatial map generation. It is a good pre-processing and post-processing tool for hydrological and hydraulic modellers.\n\n**If you feel hyfo is of a little help, please cite it as following:**\n\nXu, Yuanchao(2015). hyfo: Hydrology and Climate Forecasting R Package for Data Analysis and Visualization. Retrieved from http://yuanchao-xu.github.io/hyfo/\n\n\n\n\n", + "created" : 1488018138822.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3806014512", + "id" : "E1CE201C", + "lastKnownWriteTime" : 1488018159, + "last_content_update" : 1488018159966, + "path" : "~/GitHub/hyfo/README.md", + "project_path" : "README.md", + "properties" : { + }, + "relative_order" : 55, + "source_on_save" : false, + "source_window" : "", + "type" : "markdown" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/E6AC5179 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/E6AC5179 new file mode 100644 index 0000000..691fc9b --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/E6AC5179 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/resample(generic).R\n\\docType{methods}\n\\name{resample}\n\\alias{resample}\n\\alias{resample,data.frame-method}\n\\alias{resample,list-method}\n\\title{Resample your time series or ncdf files.}\n\\usage{\nresample(data, method)\n\n\\S4method{resample}{data.frame}(data, method)\n\n\\S4method{resample}{list}(data, method)\n}\n\\arguments{\n\\item{data}{a hyfo grid data or a time series, with first column date, and second column value. The date column should\nfollow the format in \\code{as.Date}, i.e. seperate with \"-\" or \"/\". Check details for more information.}\n\n\\item{method}{A string showing whether you want to change a daily data to monthly data or monthly\ndata to daily data.e.g. \"mon2day\" and \"day2mon\".}\n}\n\\value{\nconverted time series.\n}\n\\description{\nResameple your time series or ncdf files, more info pleae see details.\n}\n\\details{\nNote, when you want to change daily data to monthly data, a new date column will be generated,\nusually the date column will be the middle date of each month, 15th, or 16th. However, if your \ntime series doesn't start from the beginning of a month or ends to the end of a month, e.g. \nfrom 1999-3-14 to 2008-2-2, the first and last generated date could be wrong. Not only the date, but also the data, because you are \nnot calculating based on a intact month. \n\nIt is a generic function, if in your case you need to debug, please see \\code{?debug()} \nfor how to debug S4 method.\n}\n\\examples{\n# Daily to monthly\ndata(testdl)\nTS <- testdl[[2]] # Get daily data\nstr(TS)\nTS_new <- resample(TS, method = 'day2mon')\n\n# Monthly to daily\nTS <- data.frame(Date = seq(as.Date('1999-9-15'), length = 30, by = '1 month'), \nrunif(30, 3, 10))\nTS_new <- resample(TS, method = 'mon2day')\n\n#' # First load ncdf file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\nvarname <- getNcdfVar(filePath) \nnc <- loadNcdf(filePath, varname)\n\nnc_new <- resample(nc, 'day2mon')\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n}\n}\n\n", + "created" : 1488014904885.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "4112848040", + "id" : "E6AC5179", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/resample.Rd", + "project_path" : "man/resample.Rd", + "properties" : { + }, + "relative_order" : 46, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EA6E74D8 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EA6E74D8 new file mode 100644 index 0000000..6a2ef5a --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EA6E74D8 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Convert a list to a dataframe.\n#' \n#' Convert a list of different time series to a dataframe. Usually the list is the output of\n#' \\code{extractPeriod}\n#' NOTE: Since it's dataframe, so the dataframes in the input datalist should have the same \n#' date, if not, please use \\code{extractPeriod} to process.\n#'\n#' @param datalist A list containing different time series, each sub list has to have the same length.\n#' @return The converted dataframe\n#' \n#' @examples\n#' # open file attached in the package.\n#' file <- system.file(\"extdata\", \"testdl.txt\", package = \"hyfo\")\n#' datalist <- dget(file) # read list file.\n#' datalist_new <- extractPeriod(datalist, commonPeriod = TRUE)\n#' \n#' dataframe <- list2Dataframe(datalist_new)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\nlist2Dataframe <- function(datalist) {\n \n data <- lapply(datalist, function(x) x[, 2:ncol(x)])\n names <- lapply(datalist, function(x) colnames(x)[2:ncol(x)])\n names <- do.call('cbind', names)\n Date <- datalist[[1]][, 1]\n data <- data.frame(data)\n colnames(data) <- names\n data <- data.frame(cbind(Date, data))\n \n return(data)\n}", + "created" : 1488017839502.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3317985071", + "id" : "EA6E74D8", + "lastKnownWriteTime" : 1488017860, + "last_content_update" : 1488017860413, + "path" : "~/GitHub/hyfo/R/list2dataframe.R", + "project_path" : "R/list2dataframe.R", + "properties" : { + }, + "relative_order" : 51, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EB85B1DD b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EB85B1DD new file mode 100644 index 0000000..ede9f38 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EB85B1DD @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/list2dataframe.R\n\\name{list2Dataframe}\n\\alias{list2Dataframe}\n\\title{Convert a list to a dataframe.}\n\\usage{\nlist2Dataframe(datalist)\n}\n\\arguments{\n\\item{datalist}{A list containing different time series, each sub list has to have the same length.}\n}\n\\value{\nThe converted dataframe\n}\n\\description{\nConvert a list of different time series to a dataframe. Usually the list is the output of\n\\code{extractPeriod}\nNOTE: Since it's dataframe, so the dataframes in the input datalist should have the same \ndate, if not, please use \\code{extractPeriod} to process.\n}\n\\examples{\n# open file attached in the package.\nfile <- system.file(\"extdata\", \"testdl.txt\", package = \"hyfo\")\ndatalist <- dget(file) # read list file.\ndatalist_new <- extractPeriod(datalist, commonPeriod = TRUE)\n\ndataframe <- list2Dataframe(datalist_new)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\n", + "created" : 1487956425806.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1268507476", + "id" : "EB85B1DD", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/list2Dataframe.Rd", + "project_path" : "man/list2Dataframe.Rd", + "properties" : { + }, + "relative_order" : 42, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EBF4F7FE b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EBF4F7FE new file mode 100644 index 0000000..75a2aac --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EBF4F7FE @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/getSpatialMap.R\n\\name{getSpatialMap_mat}\n\\alias{getSpatialMap_mat}\n\\title{Replot raster matrix}\n\\usage{\ngetSpatialMap_mat(matrix, title_d = NULL, catchment = NULL, point = NULL,\n output = \"data\", name = NULL, info = FALSE, scale = \"identity\",\n color = NULL, ...)\n}\n\\arguments{\n\\item{matrix}{A matrix raster, should be the result of \\code{getSpatialMap()}, output should be default\nor 'data'}\n\n\\item{title_d}{A string showing the title of the plot, defaut is NULL.}\n\n\\item{catchment}{A catchment file geting from \\code{shp2cat()} in the package, if a catchment is available for background.}\n\n\\item{point}{A dataframe, showing other information, e.g., location of the gauging stations. The \nthe data.frame should be with columes \"name, lon, lat, z, value\".}\n\n\\item{output}{A string showing the type of the output, if \\code{output = 'ggplot'}, the returned \ndata can be used in ggplot and \\code{getSpatialMap_comb()}; if \\code{output = 'plot'}, the returned data is the plot containing all \nlayers' information, and can be plot directly or used in grid.arrange; if not set, the raster matrix data\nwill be returned.}\n\n\\item{name}{If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\ndifferent outputs in the later multiplot using \\code{getSpatialMap_comb}.}\n\n\\item{info}{A boolean showing whether the information of the map, e.g., max, mean ..., default is FALSE.}\n\n\\item{scale}{A string showing the plot scale, 'identity' or 'sqrt'.}\n\n\\item{color}{Most of time you don't have to set this, but if you are not satisfied with the \ndefault color, you can set your own palette here. e.g., \\code{color = c('red', 'blue')}, then\nthe value from lowest to highest, will have the color from red to blue. More info about color,\nplease check ?palette().}\n\n\\item{...}{\\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\ndefault is about precipitation.}\n}\n\\value{\nA matrix representing the raster map is returned, and the map is plotted.\n}\n\\description{\nreplot the matrix output from \\code{getSpatialMap}, when \\code{output = 'data'} or output is default\nvalue.\n}\n\\examples{\n\n\\dontrun{\ndata(tgridData)# the result of \\\\code{loadNcdf}\n#the output type of has to be default or 'data'.\na1 <- getSpatialMap(tgridData, method = 'mean')\na2 <- getSpatialMap(tgridData, method = 'max')\na3 <- getSpatialMap(tgridData, method = 'winter')\na4 <- getSpatialMap(tgridData, method = 'summer')\n#For example, if we want to investigate the difference between mean value and max.\n\na5 <- a2 - a1\ngetSpatialMap_mat(a4)\n\n#Or to investigate the difference between winter value and summer value.\na6 <- a3 - a4\ngetSpatialMap_mat(a6)\n\n}\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\nStatistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n\n\\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n\n\\item Hadley Wickham (2011). The Split-Apply-Combine Strategy for Data Analysis. Journal of Statistical\nSoftware, 40(1), 1-29. URL http://www.jstatsoft.org/v40/i01/.\n\n\\item Original S code by Richard A. Becker and Allan R. Wilks. R version by Ray Brownrigg. Enhancements\nby Thomas P Minka (2015). maps: Draw Geographical Maps. R package version\n2.3-11. https://CRAN.R-project.org/package=maps\n\n\\item Roger Bivand and Nicholas Lewin-Koh (2015). maptools: Tools for Reading and Handling Spatial\nObjects. R package version 0.8-36. https://CRAN.R-project.org/package=maptools\n\n\\item Roger Bivand and Colin Rundel (2015). rgeos: Interface to Geometry Engine - Open Source (GEOS). R\npackage version 0.3-11. https://CRAN.R-project.org/package=rgeos\n\n}\n}\n\n", + "created" : 1487956385696.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "644387789", + "id" : "EBF4F7FE", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/getSpatialMap_mat.Rd", + "project_path" : "man/getSpatialMap_mat.Rd", + "properties" : { + }, + "relative_order" : 41, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EC7924C8 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EC7924C8 new file mode 100644 index 0000000..488d374 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EC7924C8 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Collect data from csv for Anarbe case.\n#' \n#' Collect data from the gauging stations in spain, catchement Anarbe\n#' \n#' @param folderName A string showing the path of the folder holding different csv files.\n#' @param output A boolean showing whether the output is given, default is T.\n#' @return The collected data from different csv files.\n#' @examples\n#' \n#' #use internal data as an example.\n#' file <- system.file(\"extdata\", \"1999.csv\", package = \"hyfo\")\n#' folder <- strsplit(file, '1999')[[1]][1]\n#' a <- collectData_csv_anarbe(folder)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \n#' \\itemize{\n#' \\item http://meteo.navarra.es/estaciones/mapadeestaciones.cfm\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' @source http://meteo.navarra.es/estaciones/mapadeestaciones.cfm\n#' @export\n#' @importFrom utils tail\n#' @importFrom data.table rbindlist\ncollectData_csv_anarbe <- function(folderName, output = TRUE){\n \n fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE)\n data <- lapply(fileNames, readColumn_csv_anarbe)\n data <- rbindlist(data)\n data <- data[, 1:2]\n # cus the special structure of data.tables, here should be data[[1]], instead of data[, 1]\n data[, 1] <- as.Date(data[[1]], format = '%d/%m/%Y')\n \n #newFileName <- file.choose(new = T)\n #write.table(data_new,file=newFileName,row.names = F, col.names = F,sep=',')\n a <- unlist(strsplit(folderName, '\\\\\\\\|/'))\n tarName <- tail(a, 2)[1]\n colnames(data) <- c('Date', tarName)\n \n if (output) return(data)\n}\n\n\nreadColumn_csv_anarbe <- function(fileName){\n data <- read.csv(fileName, skip = 4)\n endIndex <- which(data == '', arr.ind = TRUE)[1]-1\n \n data <- data[1:endIndex, ]\n \n if (!is.null(levels(data[, 2]))) {\n data[, 2] <- as.numeric(levels((data[, 2])))[data[, 2]]\n }\n \n colnames(data) <- c('Date', 'target')\n message(fileName)\n \n return(data)\n}\n\n\n\n#' Collect data from different excel files\n#' \n#' @param folderName A string showing the folder path.\n#' @param keyword A string showing the extracted column, e.g., waterLevel, waterBalance.\n#' @param output A boolean showing whether the output is given.\n#' @return The collected data from different excel files.\n#' @export\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n# @importFrom utils write.table\ncollectData_excel_anarbe <- function(folderName, keyword = NULL, output = TRUE){\n \n message('In order to make \"hyfo\" easier to be installed, this part is commented,\n check original R file in your computer or go to \n https://github.com/Yuanchao-Xu/hyfo/blob/master/R/collectData_excel.R\n for ideas.')\n \n \n # newFileName <- file.choose(new = TRUE)\n # message ('new file should be located a different location than the excel folder, \n # in order to avoid error.\n # At least 2 excels should be in the folder\\n')\n # \n # message ('this function only applies to strange spain dem operation record file, and this strange file changes\n # its format in the middle of the record. For other applications, some tiny changes needs to be made.')\n # if (is.null(keyword)) stop('key word is needed, e.g.\"waterLevel\".')\n # \n # fileNames <- list.files(folderName, pattern = '*.xls', full.names = TRUE)\n # data <- lapply(fileNames, FUN = readColumn_excel_anarbe, keyword = keyword)\n # checkBind(data, 'rbind')\n # data <- do.call('rbind', data)\n # \n # data_new <- data.frame(data)\n # \n # data_new <- data_new[order(data_new[, 1]), ]\n # \n # \n # startDate <- data_new[1, 1]\n # endDate <- data_new[length(data_new[, 1]), 1]\n # \n # Date <- as.factor(seq(startDate, endDate, by = 1))\n # \n # if (length(Date) != length(data_new[, 1])) stop('check if the excel files are continuous')\n # \n # colnames(data_new) <- c('Date', keyword)\n # \n # write.table(data_new, file = newFileName,\n # row.names = FALSE, col.names = TRUE, sep = ',')\n # if(output == TRUE) return(data_new)\n}\n\n# \n# @importFrom xlsx read.xlsx\n# readTable_excel_anarbe <- function(fileName){\n# \n# index <- tail(strsplit(fileName, '\\\\.|\\\\ ')[[1]], 3)\n# raw_year <- index[1]\n# raw_mon <- index[2]\n# \n# raw <- read.xlsx(fileName, sheetName='A')\n# startRow <- which(raw == 'COTA', arr.ind = TRUE)[1]+4\n# startCol <- which(raw == 'COTA',arr.ind = TRUE)[2]-1\n# stopRow <- which(raw =='TOTAL',arr.ind = TRUE)[1]-1\n# stopCol1 <- startCol + 17\n# stopCol2 <- which(raw == 'SUPERFICIE', arr.ind = TRUE)[2]\n# data <- cbind(raw[startRow:stopRow,startCol:stopCol1], raw[startRow:stopRow,stopCol2])\n# \n# \n# yearIndex <- rep(raw_year, stopRow-startRow+1)\n# monIndex <- rep(raw_mon, stopRow-startRow+1)\n# \n# data <- cbind(yearIndex, monIndex, data)\n# return(data)\n# }\n# # \n# @importFrom utils tail\n# readColumn_excel_anarbe <- function(fileName, keyword = NULL){\n# \n# index <- tail(strsplit(fileName, '\\\\.|\\\\ ')[[1]],3)\n# year <- as.numeric(index[1])\n# mon <- as.numeric(index[2])\n# \n# if (year == 99) {\n# year = year + 1900\n# } else year = year + 2000\n# \n# word = c('COTA', 'Cota\\n(m)', 'TOTAL', ' TOTAL')\n# \n# if (keyword == 'waterLevel') {\n# searchWord <- c('COTA', 'Cota\\n(m)')\n# } else if (keyword == 'discharge_ERE') {\n# searchWord <- c('AF.ERE-', 'Caudal\\n(m??/s)')\n# } else if (keyword == 'waterBalance') {\n# searchWord <- c('INCREMENTO', 'al Canal Bajo', 'AFORO',\n# 'Variaci??n\\nvolumen embalsado')\n# } else if (keyword == 'surfaceArea') {\n# searchWord <- c('SUPERFICIE', 'SUPERFICIE')\n# } else if (keyword == 'volume') {\n# searchWord <- c('EMBALSADO', 'Volumen\\n(m????)')\n# }\n# \n# \n# if (year == 1999 | year < 2009 | (year == 2009 & mon < 5)) {\n# raw <- xlsx::read.xlsx(fileName, sheetName = 'A')\n# startIndex <- which(raw == word[1], arr.ind = TRUE)\n# endIndex <- which(raw == word[3], arr.ind = TRUE)\n# startRow <- startIndex[1]+4\n# endRow <- endIndex[1]-1\n# \n# dayCol <- endIndex[2]\n# day <- raw[startRow:endRow, dayCol]\n# \n# targetCol <- which(raw == searchWord[1], arr.ind = TRUE)[2]\n# \n# if (is.na(targetCol)) stop(sprintf('capture nothing in %s', fileName))\n# \n# if (keyword == 'waterBalance') {\n# targetStart <- targetCol\n# targetEnd <- which(raw == searchWord[3], arr.ind = TRUE)[2]\n# a <- raw[startRow:endRow, targetStart:targetEnd]\n# a <- sapply(a, function(x) as.numeric(levels(x)[x]))\n# \n# if (year == 1999 & mon == 4) {\n# \n# target <- data.frame(a[, 2] * 86.4, a[, 5] * 86.4, rep(NA, dim(a)[1]), a[, 6] * 86.4,\n# a[, 4] * 86.4, a[, 11] * 86.4, a[, 3], a[, 7], rep(NA, dim(a)[1]), a[, 1])\n# } else {\n# target <- data.frame(a[, 2] * 86.4, a[, 5] * 86.4, a[, 6] * 86.4, a[, 7] * 86.4, \n# a[, 4] * 86.4, a[, 12] * 86.4, a[, 3], a[, 8], rep(NA, dim(a)[1]), a[, 1])\n# } \n# \n# } else {\n# target <- raw[startRow:endRow, targetCol]\n# if (keyword == 'discharge_ERE') target <- as.numeric(levels(target))[target]/1000\n# }\n# \n# } else {\n# raw <- read.xlsx(fileName,sheetName = 'parte del embalse')\n# startIndex <- which(raw == word[2], arr.ind = TRUE)\n# endIndex <- which(raw == word[4], arr.ind = TRUE)\n# startRow <- startIndex[1]+1\n# endRow <- endIndex[1]-2\n# \n# dayCol <- endIndex[2]\n# day <- raw[startRow:endRow, dayCol]\n# targetCol <- which(raw == searchWord[2], arr.ind=TRUE)[2]\n# if (is.na(targetCol)) stop(sprintf('capture nothing in %s', fileName))\n# \n# if (keyword == 'waterBalance') {\n# targetStart <- targetCol\n# targetEnd <- which(raw == searchWord[4], arr.ind=TRUE)[2]\n# target <- raw[startRow:endRow, targetStart:targetEnd]\n# \n# } else {\n# target <- raw[startRow:endRow, targetCol]\n# }\n# \n# }\n# \n# \n# startDate <- as.Date(paste(year, mon, day[1], sep = '-'))\n# endDate <- as.Date(paste(year, mon, tail(day,1), sep = '-'))\n# \n# Date <- seq(startDate, endDate, 1)\n# output <- data.frame(Date, as.vector(target))\n# colnames(output) <- c('Date', seq(1, dim(output)[2] - 1))\n# message(fileName) \n# return(output)\n# \n# }\n# \n\n\n\n\n\n#' collect data from different txt.\n#' \n#' @param folderName A string showing the folder path.\n#' @param output A boolean showing whether the result is given.\n#' @param rangeWord A list containing the keyword and the shift. \n#' defaut is set to be used in spain gauging station.\n#' @examples\n#' \n#' #use internal data as an example.\n#' \n#' \\dontrun{\n#' file <- system.file(\"extdata\", \"1999.csv\", package = \"hyfo\")\n#' folder <- strsplit(file, '1999')[[1]][1]\n#' a <- collectData_txt_anarbe(folder)\n#' }\n#'\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \n#' \\itemize{\n#' \\item http://www4.gipuzkoa.net/oohh/web/esp/02.asp\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' \n#' @source http://www4.gipuzkoa.net/oohh/web/esp/02.asp\n#' @return The collected data from different txt files.\n#' @export\n#' @importFrom utils tail\n#' @importFrom data.table rbindlist\ncollectData_txt_anarbe <- function(folderName, output = TRUE, rangeWord = c('Ene ', -1, \n 'Total ', -6)){\n #All the code should be ASCII encode, so there should be no strange symbol.\n if (is.null(rangeWord)) {\n stop('rangeWord consists of 4 elements:\n 1. start word which program can recognise.\n 2. shift1, the shift needs to be made. E.g. start word is in line 7, and program\n should read file from line 9, then shift is 9-7 = 2.\n 3. end word, as start word\n 4. shift2, same as shift1, sometimes can be negative\n \n E.g. rangeWord=c(\\\"aaa\\\",2,\\\"bbb\\\",-2)\n if no rangeWord, just input c(NULL,NULL,NULL,NULL)')\n \n }\n \n \n fileNames <- list.files(folderName, pattern = '*.TXT', full.names = TRUE)\n \n data <- lapply(fileNames, FUN = readColumn_txt_anarbe, rangeWord = rangeWord)\n \n data <- rbindlist(data)\n \n a <- unlist(strsplit(folderName, '\\\\\\\\|/'))\n tarName <- tail(a, 2)[1]\n colnames(data) <- c('Date', tarName)\n \n #newFileName <- file.choose(new = T)\n message('new file should be located a different location than the excel folder,\n in order to avoid error.\n At least 2 excels should be in the folder')\n \n #write.table(data_new,file=newFileName,row.names = F, col.names = F,sep=',')\n \n \n if (output == TRUE) return(data)\n \n} \n\n\n\nanarbe_txt <- function(dataset, x1, x2){\n \n data <- as.matrix(dataset[x1:x2, 2:13])\n startYear <- data[1, 6]\n \n data <- data[5:35, ]\n \n date <- which(data != ' ', arr.ind = TRUE)\n startDate <- date[1, ]\n \n endDate <- date[length(date[, 1]), ]\n \n startDate <- as.Date(paste(startYear, startDate[2], startDate[1], sep = '-'))\n endDate <- as.Date(paste(startYear, endDate[2], endDate[1], sep = '-'))\n \n Date <- as.factor(seq(startDate, endDate, 1))\n \n dim(data) <- c(length(data), 1)\n \n data <- as.numeric(data[which(data != ' '), ])\n \n if (length(data) != length(Date)) {\n stop('check original txt file. for missing value, the symbol is \"--\", check\n if this symbol is missing somewhere')\n }\n \n output <- data.frame(Date = Date, target = data)\n \n return(output)\n }\n\n\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' @importFrom utils read.fwf\nreadColumn_txt_anarbe <- function(fileName, keyword = NULL, rangeWord = NULL){\n \n a <- read.fwf(fileName, widths = rep(10,13))#read file with fixed width\n \n startRow <- which(a == rangeWord[1], arr.ind = TRUE)[, 1]\n startRow <- startRow + as.numeric(rangeWord[2])\n \n endRow <- which(a == rangeWord[3], arr.ind = TRUE)[, 1]\n endRow <- endRow + as.numeric(rangeWord[4])\n \n data <- mapply(FUN = function(x1, x2) anarbe_txt(dataset = a, x1, x2), startRow, endRow)\n \n data_new <- data.frame(Data = unlist(data[1, ]), target = unlist(data[2, ]))\n message(fileName)\n return(data_new)\n}\n", + "created" : 1487522240065.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "2673469439", + "id" : "EC7924C8", + "lastKnownWriteTime" : 1488015187, + "last_content_update" : 1488015188001, + "path" : "~/GitHub/hyfo/R/case_anarbe.R", + "project_path" : "R/case_anarbe.R", + "properties" : { + }, + "relative_order" : 13, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EEC7BFEB b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EEC7BFEB new file mode 100644 index 0000000..09a4c0d --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EEC7BFEB @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "Package: hyfo\nType: Package\nTitle: Hydrology and Climate Forecasting\nVersion: 1.3.9\nDate: 2017-2-20\nAuthors@R: person(\"Yuanchao\", \"Xu\", email = \"xuyuanchao37@gmail.com\",\n role = c(\"aut\", \"cre\"))\nDescription: Focuses on data processing and visualization in hydrology and\n climate forecasting. Main function includes data extraction, data downscaling,\n data resampling, gap filler of precipitation, bias correction of forecasting\n data, flexible time series plot, and spatial map generation. It is a good pre-\n processing and post-processing tool for hydrological and hydraulic modellers.\nLicense: GPL-2\nDepends:\n R (>= 3.1.0),\n stats (>= 3.1.3),\n utils(>= 3.1.3),\nImports:\n ggplot2 (>= 1.0.1),\n reshape2 (>= 1.4.1),\n zoo (>= 1.7-12),\n rgdal (>= 0.8-16),\n plyr (>= 1.8.3),\n moments (>= 0.14),\n lmom (>= 2.5),\n maps(>= 2.3-9),\n maptools (>= 0.8-36),\n rgeos (>= 0.3-8),\n ncdf4 (>= 1.14.1),\n MASS (>= 7.3-39),\n methods,\n data.table\nSuggests:\n gridExtra,\n knitr,\n rmarkdown\nVignetteBuilder: knitr\nLazyData: true\nURL: https://yuanchao-xu.github.io/hyfo/\nBugReports: https://github.com/Yuanchao-Xu/hyfo/issues\nRepository: CRAN\nRoxygenNote: 5.0.1\n", + "created" : 1483876827452.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "844478590", + "id" : "EEC7BFEB", + "lastKnownWriteTime" : 1487955768, + "last_content_update" : 1487955768902, + "path" : "~/GitHub/hyfo/DESCRIPTION", + "project_path" : "DESCRIPTION", + "properties" : { + }, + "relative_order" : 8, + "source_on_save" : false, + "source_window" : "", + "type" : "dcf" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EF2B4E b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EF2B4E new file mode 100644 index 0000000..0def84e --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/EF2B4E @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Get variable name of the NetCDF file.\n#' \n#' Get variable name in the NetCDF file. After knowning the name, you can use \\code{loadNcdf} to load\n#' the target variable.\n#' \n#' @param filePath A path pointing to the netCDF file.\n#' @return The names of the varialbes in the file.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @import ncdf4\n#' @references \n#' \n#' \\itemize{\n#' \\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\n#' Earlier) Format Data Files. R package version 1.14.1.\n#' https://CRAN.R-project.org/package=ncdf4\n#' }\n#' \n#' \n#' \n#' @export\ngetNcdfVar <- function(filePath) {\n nc <- nc_open(filePath)\n names <- names(nc$var)\n return(names)\n}\n\n\n#' Load NetCDF file\n#' \n#' @param filePath A path pointing to the NetCDF file, version3.\n#' @param varname A character representing the variable name, you can use \\code{getNcdfVar} to\n#' get the basic information about the variables and select the target.\n#' @param tz A string representing the time zone, default is GMT, if you know what time zone is \n#' you can assign it in the argument. If \\code{tz = ''}, current time zone will be taken.\n# @param drop When the time dimension only have one value, the output data will drop\n# this dimension automatically (\\code{drop = TRUE}), default value is \\code{drop = FALSE}, then time dimension will be added.\n# This argument mainly applies to the later calculations based on hyfo file. If the dimension\n# is dropped, than some calculations may not be processed afterwards. \n#' @param ... Several arguments including Year, month, lon, lat \n#' type in \\code{?downscaleNcdf} for details.You can load while downscale, \n#' and also first load than use \\code{downscaleNcdf} to downscale.\n#' @return A list object from \\code{hyfo} containing the information to be used in the analysis, \n#' or biascorrection.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' # you can directly add your downscale information to the argument.\n#' nc1 <- loadNcdf(filePath, varname, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7))\n#' nc2 <- loadNcdf(filePath, varname, year = 2005, month = 3:8, lon = c(-2, -0.5), \n#' lat = c(43.2, 43.7))\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @import ncdf4\n#' @references \n#' \n#' \\itemize{\n#' \\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\n#' Earlier) Format Data Files. R package version 1.14.1.\n#' https://CRAN.R-project.org/package=ncdf4\n#' \n#' \\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\n#' version 2.2-6. http://meteo.unican.es/ecoms-udg\n#' }\n#' \n#' \nloadNcdf <- function(filePath, varname, tz = 'GMT', ...) {\n nc <- nc_open(filePath)\n \n var <- nc$var\n # Use name to locate the variable\n call_1 <- as.call(c(\n list(as.name('$'), var, varname)\n ))\n var <- eval(call_1)\n if(is.null(var)) stop('No such variable name, check source file.')\n \n dimNames <- unlist(lapply(1:length(var$dim), function(x) var$dim[[x]]$name))\n \n # Only deals with the most common dimensions, futher dimensions will be added in future.\n dimIndex <- grepAndMatch(c('lon', 'lat', 'time', 'member'), dimNames)\n if (length(dimIndex) < 3) stop('Your file has less than 3 dimensions.')\n \n # First needs to identify the variable name, load the right data\n message('Loading data...')\n nc_data <- ncvar_get(nc, var)\n message('Processing...')\n \n gridData <- list()\n gridData$Variable$varName <- varname\n gridData$xyCoords$x <- var$dim[[dimIndex[1]]]$vals\n attributes(gridData$xyCoords$x)$name <- dimNames[dimIndex[1]]\n \n gridData$xyCoords$y <- var$dim[[dimIndex[2]]]$vals\n attributes(gridData$xyCoords$y)$name <- dimNames[dimIndex[2]]\n \n # Time part needs to be taken seperately\n \n timeUnit <- strsplit(var$dim[[dimIndex[3]]]$units, split = ' since')[[1]][1]\n timeDiff <- var$dim[[dimIndex[3]]]$vals\n # To get real time, time since when has to be grabbed from the dataset.\n timeSince <- as.POSIXlt(strsplit(var$dim[[dimIndex[3]]]$units, split = 'since')[[1]][2], tz = tz)\n \n \n# Date <- rep(timeSince, length(timeDiff))\n \n \n unitDic <- data.frame(weeks = 'weeks', days = 'days', hours = 'hours',\n minutes = 'mins', seconds = 'secs')\n \n timeDiff <- as.difftime(timeDiff, units = as.character(unitDic[1, timeUnit]))\n \n# if (grepl('day', timeUnit)) {\n# Date$mday <- Date$mday + timeDiff\n# } else if (grepl('second', timeUnit)) {\n# Date$sec <- Date$sec + timeDiff\n# }\n Date <- timeSince + timeDiff\n \n # data directly loaded from ncdf4 will drop the dimension with only one value.\n # the varsize shows the real dimension, without any dropping.\n dim(nc_data) <- var$varsize \n \n # Right now there is no need to add end Date, in furture, may be added as needed.\n gridData$Dates$start <- as.character(Date)\n \n # Assing data to grid data\n # At leaset should be 3 dimensions, lon, lat, time. So if less than 3, \n \n gridData$Data <- nc_data\n \n attributes(gridData$Data)$dimensions <- dimNames\n \n if (!is.na(dimIndex[4])) gridData$Members <- var$dim[[dimIndex[4]]]$vals\n \n gridData$Loaded <- 'by hyfo package, https://yuanchao-xu.github.io/hyfo/'\n nc_close(nc)\n \n output <- downscaleNcdf(gridData, ...)\n \n return(output)\n \n}\n\n\n\n\n#' Downscale NetCDF file\n#' @param gridData A hyfo list file from \\code{\\link{loadNcdf}}\n#' @param year A vector of the target year. e.g. \\code{year = 2000}, \\code{year = 1980:2000}\n#' @param month A vector of the target month. e.g. \\code{month = 2}, \\code{month = 3:12}\n#' @param lon A vector of the range of the downscaled longitude, should contain a max value\n#' and a min value. e.g. \\code{lon = c(-1.5, 2,5)}\n#' @param lat A vector of the range of the downscaled latitude, should contain a max value\n#' and a min value. e.g. \\code{lat = c(32,2, 36)}\n#' @return A downscaled hyfo list file.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' # Then write to your work directory\n#' \n#' nc1 <- downscaleNcdf(nc, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7))\n#' nc2 <- downscaleNcdf(nc, year = 2005, month = 3:8, lon = c(-2, -0.5), lat = c(43.2, 43.7))\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export \n#' @references \n#' \n#' \\itemize{\n#' \n#' \\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\n#' version 2.2-6. http://meteo.unican.es/ecoms-udg\n#' }\n#' \n#' \ndownscaleNcdf <- function(gridData, year = NULL, month = NULL, lon = NULL, lat = NULL) {\n \n \n if (!is.null(year)) {\n Dates <- as.POSIXlt(gridData$Dates$start)\n yearIndex <- Dates$year + 1900\n monIndex <- Dates$mon + 1\n timeDim <- match('time', attributes(gridData$Data)$dimensions)\n \n \n if (is.null(month) || !any(sort(month) != month)) {\n targetYearIndex <- which(yearIndex %in% year)\n if (length(targetYearIndex) == 0) stop('No input years in the input ts, check your input.')\n \n \n # if year crossing than sort(month) != month\n } else {\n \n startIndex <- intersect(which(yearIndex == year[1] - 1), which(monIndex == month[1]))[1]\n endIndex <- tail(intersect(which(yearIndex == tail(year, 1)), which(monIndex == tail(month, 1))), 1)\n \n if (is.na(startIndex) || length(endIndex) == 0 || startIndex > endIndex) {\n stop('Cannot find input months and input years in the input time series.')\n } else {\n \n targetYearIndex <- startIndex:endIndex\n \n if (any(diff(year) != 1)) {\n # if year is not continuous, like 1999, 2003, 2005, than we have to sift again.\n # Only for special cases.\n Dates <- Dates[targetYearIndex]\n yea <- Dates$year + 1900\n mon <- Dates$mon + 1\n \n DateIndex <- unlist(sapply(year, function(x) {\n startIndex <- intersect(which(yea == x - 1), which(mon == month[1]))[1]\n endIndex <- tail(intersect(which(yea == x), which(mon == tail(month, 1))), 1)\n index <- startIndex:endIndex\n return(index)\n }))\n \n \n targetYearIndex <- targetYearIndex[DateIndex]\n # cannot directly return output here, because sometimes, month can be incontinuous,\n # we still need the next process to sift month.\n }\n }\n }\n \n gridData$Dates$start <- gridData$Dates$start[targetYearIndex]\n gridData$Dates$end <- gridData$Dates$end[targetYearIndex]\n \n gridData$Data <- chooseDim(gridData$Data, timeDim, targetYearIndex)\n } \n \n if (!is.null(month)) {\n Dates <- as.POSIXlt(gridData$Dates$start)\n monIndex <- Dates$mon + 1\n \n targetMonIndex <- which(monIndex %in% month)\n if (length(targetMonIndex) == 0) stop('Check your input year, it may exceed the years \n in the input dataset.')\n gridData$Dates$start <- gridData$Dates$start[targetMonIndex]\n gridData$Dates$end <- gridData$Dates$end[targetMonIndex]\n \n timeDim <- match('time', attributes(gridData$Data)$dimensions)\n \n gridData$Data <- chooseDim(gridData$Data, timeDim, targetMonIndex)\n \n }\n \n if (!is.null(lon)) {\n \n lonIndex <- gridData$xyCoords$x\n \n lonI1 <- which(abs(lonIndex - min(lon)) == min(abs(lonIndex - min(lon)), na.rm = TRUE)) \n lonI2 <- which(abs(lonIndex - max(lon)) == min(abs(lonIndex - max(lon)), na.rm = TRUE)) \n \n # take the as large as possible range\n targetLonIndex <- lonI1[length(lonI1)]:lonI2[length(lonI2)]\n if (length(targetLonIndex) == 0) stop('Your input lon is too small, try to expand the \n longitude range.') \n gridData$xyCoords$x <- gridData$xyCoords$x[targetLonIndex]\n lonDim <- grepAndMatch('lon', attributes(gridData$Data)$dimensions)\n \n gridData$Data <- chooseDim(gridData$Data, lonDim, targetLonIndex)\n }\n \n \n if (!is.null(lat)) {\n latIndex <- gridData$xyCoords$y\n \n latI1 <- which(abs(latIndex - min(lat)) == min(abs(latIndex - min(lat)), na.rm = TRUE)) \n latI2 <- which(abs(latIndex - max(lat)) == min(abs(latIndex - max(lat)), na.rm = TRUE)) \n \n targetLatIndex <- latI1[length(latI1)]:latI2[length(latI2)]\n \n if (length(targetLonIndex) == 0) stop('Your input lat is too small, try to expand the \n latitude range.') \n gridData$xyCoords$y <- gridData$xyCoords$y[targetLatIndex]\n latDim <- grepAndMatch('lat', attributes(gridData$Data)$dimensions)\n gridData$Data <- chooseDim(gridData$Data, latDim, targetLatIndex)\n }\n \n return(gridData)\n \n}\n\n\n\n\n\n\n\n\n\n\n#' Write to NetCDF file using hyfo list file\n#' @param gridData A hyfo list file from \\code{\\link{loadNcdf}}\n#' @param filePath A path of the new NetCDF file, should end with \".nc\"\n#' @param missingValue A number representing the missing value in the NetCDF file, default\n#' is 1e20\n#' #' @param tz A string representing the time zone, default is GMT, if you know what time zone is \n#' you can assign it in the argument. If \\code{tz = ''}, current time zone will be taken.\n#' @param units A string showing in which unit you are putting in the NetCDF file, it can be \n#' seconds or days and so on. If not specified, the function will pick up the possible largest \n#' time units from \\code{c('weeks', 'days', 'hours', 'mins', 'secs')}\n#' @param version ncdf file versions, default is 3, if 4 is chosen, output file will be foreced to version 4.\n#' @param tz time zone, default is \"GMT\"\n#' @return An NetCDF version 3 file.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' # Then write to your work directory\n#' \n#' writeNcdf(nc, 'test.nc')\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export \n#' @import ncdf4\n#' @references \n#' \n#' \\itemize{\n#' \\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\n#' Earlier) Format Data Files. R package version 1.14.1.\n#' https://CRAN.R-project.org/package=ncdf4\n#' \n#' \\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\n#' version 2.2-6. http://meteo.unican.es/ecoms-udg\n#' \n#' }\n#' \n#' \nwriteNcdf <- function(gridData, filePath, missingValue = 1e20, tz = 'GMT', units = NULL, version = 3) {\n \n name <- gridData$Variable$varName\n # First defines dimensions.\n lonName <- attributes(gridData$xyCoords$x)$name\n latName <- attributes(gridData$xyCoords$y)$name\n dimLon <- ncdim_def(lonName, 'degree', gridData$xyCoords$x)\n dimLat <- ncdim_def(latName, 'degree', gridData$xyCoords$y)\n dimMem <- NULL\n if (!is.null(gridData$Members)) {\n dimMem <- ncdim_def('member', 'members', 1:length(gridData$Members))\n }\n \n \n # Time needs to be treated seperately\n dates <- as.POSIXlt(gridData$Dates$start, tz = tz)\n if (is.null(units)) {\n units <- getTimeUnit(dates)\n time <- difftime(dates, dates[1], units = units)\n } else {\n time <- difftime(dates, dates[1], units = units)\n }\n timeUnits <- paste(units, 'since', dates[1])\n # Here time needs to be numeric, as required by ncdf4 package, which is not the same\n # with ncdf\n dimTime <- ncdim_def('time', timeUnits, as.numeric(time))\n \n \n # Depending on whether there is a member part of the dataset.\n # default list\n dimList <- list(dimLon, dimLat, dimTime, dimMem)\n \n # In order to keep the dim list exactly the same with the original one, it needs to be changed.\n dimIndex <- grepAndMatch(c('lon', 'lat', 'time', 'member'), attributes(gridData$Data)$dimensions)\n dimIndex <- na.omit(dimIndex)\n \n # Here order is needed, cuz in the procesure above, c('lon', 'lat', 'time', 'member')\n # is the pattern, while actually, attributes(gridData$Data)$dimensions should be the pattern.\n # So here needs an order() to get the wanted result.\n dimList <- dimList[order(dimIndex)]\n \n # delete the NULL list, in order that there is no member part in the data.\n dimList <- Filter(Negate(is.null), dimList)\n # Then difines data\n var <- ncvar_def( name, \"units\", dimList, missingValue)\n \n \n # Here for ncdf4, there is an option to create version 4 ncdf, in future, it\n # may added here.\n if (version == 3) {\n nc <- nc_create(filePath, var) \n } else if (version == 4) {\n nc <- nc_create(filePath, var, force_v4 = TRUE)\n } else {\n stop(\"Which ncdf version you want? Only 3 and 4 can be selected!\")\n }\n \n # This part comes from the library downscaleR, can be deleted if you don't \n # use {ecomsUDG.Raccess}, by adding this, the file can be read by the package {ecomsUDG.Raccess}\n ncatt_put(nc, \"time\", \"standard_name\",\"time\")\n ncatt_put(nc, \"time\", \"axis\",\"T\")\n ncatt_put(nc, \"time\", \"_CoordinateAxisType\",\"Time\")\n #ncatt_put(nc, \"time\", \"_ChunkSize\",1)\n ncatt_put(nc, lonName, \"standard_name\",\"longitude\")\n ncatt_put(nc, lonName, \"_CoordinateAxisType\",\"Lon\")\n ncatt_put(nc, latName, \"standard_name\",\"latitude\")\n ncatt_put(nc, latName, \"_CoordinateAxisType\",\"Lat\")\n if (!is.null(dimMem)){\n ncatt_put(nc, \"member\", \"standard_name\",\"realization\")\n ncatt_put(nc, \"member\", \"_CoordinateAxisType\",\"Ensemble\")\n #att.put.ncdf(nc, \"member\", \"ref\",\"http://www.uncertml.org/samples/realisation\")\n }\n \n \n # This part has to be put\n ncatt_put(nc, 0, \"Conventions\",\"CF-1.4\")\n ncatt_put(nc, 0, 'WrittenBy', 'hyfo(https://yuanchao-xu.github.io/hyfo/)')\n \n #data <- aperm(gridData$Data, dimIndex) no need to do this, in the process above\n # when you define the dimlist, the order of the dimension was fixed.\n data <- gridData$Data\n ncvar_put(nc, name, data)\n nc_close(nc)\n \n}\n\n# For internaluse by writeNcdf\ngetTimeUnit <- function(dates) {\n units <- c('weeks', 'days', 'hours', 'mins', 'secs')\n output <- NULL\n for (unit in units) {\n time <- difftime(dates, dates[1], units = unit)\n rem <- sapply(time, function(x) x%%1)\n if (!any(rem != 0)) {\n output <- unit\n break\n }\n } \n return(output)\n}\n\n\n# Save for future use. \n#' @import ncdf4\n#' @references \n#' David Pierce (2014). ncdf: Interface to Unidata netCDF data files. R package version 1.6.8.\n#' https://CRAN.R-project.org/package=ncdf\ngetExtralDim <- function(...) {\n dimList <- list(...)\n \n \n}\n\n# in order to first grep than match.\n# match only provides for exactly match, \n# dimIndex <- grepAndMatch(c('lon', 'lat', 'time', 'member'), dimNames)\ngrepAndMatch <- function(x, table) {\n index <- unlist(lapply(x, function(x) {\n a <- grep(x, table)\n }))\n return(index)\n}", + "created" : 1483880947871.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "4267879418", + "id" : "EF2B4E", + "lastKnownWriteTime" : 1488018076, + "last_content_update" : 1488018076214, + "path" : "~/GitHub/hyfo/R/ncdf.R", + "project_path" : "R/ncdf.R", + "properties" : { + }, + "relative_order" : 10, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F28DEBD3 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F28DEBD3 new file mode 100644 index 0000000..75f7e4a --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F28DEBD3 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' Get spatial map of the input dataset.\n#' \n#' @param dataset A list containing different information, should be the result of reading netcdf file using\n#' \\code{loadNcdf}.\n#' @param method A string showing different calculating method for the map. More information please refer to\n#' details.\n#' @param member A number showing which member is selected to get, if the dataset has a \"member\" dimension. Default\n#' is NULL, if no member assigned, and there is a \"member\" in dimensions, the mean value of the members will be\n#' taken.\n#' @param ... several arguments including x, y, title, catchment, point, output, name, info, scale, color, \n#' type in \\code{?getSpatialMap_mat} for details.\n#' @return A matrix representing the raster map is returned, and the map is plotted.\n#' @details\n#' There are following methods to be selected, \n#' \"meanAnnual\": annual rainfall of each year is plotted. \n#' \"winter\", \"spring\", \"autumn\", \"summer\": MEAN seasonal rainfall of each year is plotted.\n#' Month(number 1 to 12): MEAN month rainfall of each year is plotted, e.g. MEAN march rainfall of each year.\n#' \"mean\", \"max\", \"min\": mean daily, maximum daily, minimum daily precipitation.\n#' @examples\n#' \n#' \n#' \\dontrun{\n#' #gridData provided in the package is the result of \\code {loadNcdf}\n#' data(tgridData)\n#' getSpatialMap(tgridData, method = 'meanAnnual')\n#' getSpatialMap(tgridData, method = 'winter')\n#' \n#' \n#' getSpatialMap(tgridData, method = 'winter', catchment = testCat)\n#' \n#' file <- system.file(\"extdata\", \"point.txt\", package = \"hyfo\")\n#' point <- read.table(file, header = TRUE, sep = ',' )\n#' getSpatialMap(tgridData, method = 'winter', catchment = testCat, point = point)\n#' }\n#' \n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\ngetSpatialMap <- function(dataset, method = NULL, member = 'mean', ...) {\n\n #check input dataset\n checkHyfo(dataset)\n \n #range of the dataset just loaded \n lon <- dataset$xyCoords$x\n lat <- dataset$xyCoords$y\n startTime <- as.POSIXlt(dataset$Dates$start, tz = 'GMT')\n yearIndex <- startTime$year + 1900\n monthIndex <-startTime$mon + 1\n data <- dataset$Data\n \n # Dimension needs to be arranged. Make sure first and second dimension is lat and lon.\n data <- adjustDim(data, ref = c('lon', 'lat', 'time'))\n \n # Because in the following part, only 3 dimensions are allowed, so data has to be processed.\n if (member == 'mean' & any(attributes(data)$dimensions == 'member')) {\n dimIndex3 <- which(attributes(data)$dimensions != 'member')\n data <- apply(data, MARGIN = dimIndex3, FUN = mean, na.rm = TRUE)\n message('Mean value of the members are returned.')\n \n } else if (member != 'mean' & any(attributes(data)$dimensions == 'member')) {\n dimIndex3 <- which(attributes(data)$dimensions == 'member')\n data <- chooseDim(data, dimIndex3, member, drop = TRUE)\n \n } else if (member != 'mean' & !any(attributes(data)$dimensions == 'member')){\n stop('There is no member part in the dataset, but you choose one, check the input\n dataset or change your arguments.')\n }\n \n \n \n \n if (is.null(method)) {\n \n warning('You should shoose a method, unless input is a matrix directly to be plotted.')\n #in case the dataset is ready to plot and no need to calculate\n \n } else if (method == 'meanAnnual') { \n #mean value of the annual precipitation over the period of the data \n #time <- proc.time()\n if (length(unique(monthIndex)) < 12) {\n warning ('There are less than 12 months in a year, the results may be inaccurate.')\n }\n data_new <- apply(data, MARGIN = c(2, 1), FUN = getMeanPreci, yearIndex = yearIndex, method = 'annual')\n #newTime <- proc.time() - time\n title_d <- 'Mean Annual Precipitation (mm / year)'\n \n } else if (method == 'winter') {\n #mean value of the seasonal precipitation, in this case, winter \n #time <- proc.time()\n wm <- match(c(12, 1, 2), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop ('Winter has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n data_new <- apply(data, MARGIN = c(2, 1), FUN = getMeanPreci, yearIndex = yearIndex, monthIndex = monthIndex, \n method = 'winter')\n #newTime <- proc.time() - time\n title_d <- 'Mean Winter Precipitation (mm / winter)'\n \n } else if (method == 'spring') {\n wm <- match(c(3, 4, 5), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop ('Spring has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n data_new <- apply(data, MARGIN = c(2, 1), FUN = getMeanPreci, yearIndex = yearIndex, monthIndex = monthIndex, \n method = 'spring') \n title_d <- 'Mean Spring Precipitation (mm / spring)'\n \n } else if (method == 'summer') {\n wm <- match(c(6, 7, 8), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop ('Summer has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n data_new <- apply(data, MARGIN = c(2, 1), FUN = getMeanPreci, yearIndex = yearIndex, monthIndex = monthIndex, \n method = 'summer') \n title_d <- 'Mean Summer Precipitation (mm / summer)'\n \n } else if (method == 'autumn') {\n \n wm <- match(c(9, 10, 11), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop ('Autumn has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n data_new <- apply(data, MARGIN = c(2, 1), FUN = getMeanPreci, yearIndex = yearIndex, monthIndex = monthIndex, \n method = 'autumn') \n title_d <- 'Mean Autumn Precipitation (mm / autumn)'\n \n } else if (method == 'mean') {\n \n #sum value of the dataset, this procedure is to get the mean value\n data_new <- apply(data, MARGIN = c(2, 1), FUN = mean, na.rm = TRUE)\n title_d <- 'Mean Daily Precipitation (mm / day)'\n \n } else if (method == 'max') {\n \n data_new <- apply(data, MARGIN = c(2, 1), FUN = suppressWarnings(max), na.rm = TRUE)\n title_d <- 'Max Daily Precipitation (mm / day)'\n \n } else if (method == 'min') {\n \n data_new <- apply(data, MARGIN = c(2, 1), FUN = suppressWarnings(min), na.rm = TRUE)\n title_d <- 'Min Daily Precipitation (mm / day)'\n \n } else if (is.numeric(method)) {\n \n data_new <- apply(data, MARGIN = c(2, 1), FUN = getMeanPreci, yearIndex = yearIndex, monthIndex = monthIndex, \n method = method) \n title_d <- paste(month.abb[method], 'Precipitation (mm / month)', sep = ' ')\n \n } else {\n wrongMethod <- method\n stop(paste('no method called', wrongMethod))\n }\n # This is to give attributes to the matrix and better be melted in ggplot.\n colnames(data_new) <- round(lon, 2)\n rownames(data_new) <- round(lat, 2)\n \n # If ... also has a title argument, this will cause conflicts. so title has to be renamed as title_d\n # This has to be paid a lot of attention when use ... to pass arguments.\n output <- getSpatialMap_mat(matrix = data_new, title_d = title_d, ...)\n return(output)\n}\n\n\n\n\n\n#' Replot raster matrix\n#' \n#' replot the matrix output from \\code{getSpatialMap}, when \\code{output = 'data'} or output is default\n#' value.\n#' \n#' @param matrix A matrix raster, should be the result of \\code{getSpatialMap()}, output should be default\n#' or 'data'\n#' @param title_d A string showing the title of the plot, defaut is NULL.\n#' @param catchment A catchment file geting from \\code{shp2cat()} in the package, if a catchment is available for background.\n#' @param point A dataframe, showing other information, e.g., location of the gauging stations. The \n#' the data.frame should be with columes \"name, lon, lat, z, value\".\n#' @param output A string showing the type of the output, if \\code{output = 'ggplot'}, the returned \n#' data can be used in ggplot and \\code{getSpatialMap_comb()}; if \\code{output = 'plot'}, the returned data is the plot containing all \n#' layers' information, and can be plot directly or used in grid.arrange; if not set, the raster matrix data\n#' will be returned.\n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{getSpatialMap_comb}.\n#' @param info A boolean showing whether the information of the map, e.g., max, mean ..., default is FALSE.\n#' @param scale A string showing the plot scale, 'identity' or 'sqrt'.\n#' @param color Most of time you don't have to set this, but if you are not satisfied with the \n#' default color, you can set your own palette here. e.g., \\code{color = c('red', 'blue')}, then\n#' the value from lowest to highest, will have the color from red to blue. More info about color,\n#' please check ?palette().\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#'default is about precipitation.\n#' @return A matrix representing the raster map is returned, and the map is plotted.\n#' @examples\n#' \n#' \\dontrun{\n#' data(tgridData)# the result of \\code{loadNcdf}\n#' #the output type of has to be default or 'data'.\n#' a1 <- getSpatialMap(tgridData, method = 'mean')\n#' a2 <- getSpatialMap(tgridData, method = 'max')\n#' a3 <- getSpatialMap(tgridData, method = 'winter')\n#' a4 <- getSpatialMap(tgridData, method = 'summer')\n#' #For example, if we want to investigate the difference between mean value and max.\n#' \n#' a5 <- a2 - a1\n#' getSpatialMap_mat(a4)\n#' \n#' #Or to investigate the difference between winter value and summer value.\n#' a6 <- a3 - a4\n#' getSpatialMap_mat(a6)\n#' \n#' }\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @import ggplot2 plyr maps maptools rgeos\n#' @importFrom stats median\n#' @importFrom reshape2 melt\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' \n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \n#' \\item Hadley Wickham (2011). The Split-Apply-Combine Strategy for Data Analysis. Journal of Statistical\n#' Software, 40(1), 1-29. URL http://www.jstatsoft.org/v40/i01/.\n#' \n#' \\item Original S code by Richard A. Becker and Allan R. Wilks. R version by Ray Brownrigg. Enhancements\n#' by Thomas P Minka (2015). maps: Draw Geographical Maps. R package version\n#' 2.3-11. https://CRAN.R-project.org/package=maps\n#' \n#' \\item Roger Bivand and Nicholas Lewin-Koh (2015). maptools: Tools for Reading and Handling Spatial\n#' Objects. R package version 0.8-36. https://CRAN.R-project.org/package=maptools\n#' \n#' \\item Roger Bivand and Colin Rundel (2015). rgeos: Interface to Geometry Engine - Open Source (GEOS). R\n#' package version 0.3-11. https://CRAN.R-project.org/package=rgeos\n#' \n#' }\n#' \n#' \n#' \n#' \n#' \ngetSpatialMap_mat <- function(matrix, title_d = NULL, catchment = NULL, point = NULL, output = 'data', \n name = NULL, info = FALSE, scale = 'identity', color = NULL, ...) {\n #check input\n checkWord <- c('lon', 'lat', 'z', 'value')\n if (is.null(attributes(matrix)$dimnames)) {\n stop('Input matrix is incorrect, check help to know how to get the matrix.')\n } else if (!is.null(catchment) & class(catchment) != \"SpatialPolygonsDataFrame\") {\n stop('Catchment format is incorrect, check help to get more details. ')\n } else if (!is.null(point) & any(is.na(match(checkWord, attributes(point)$names)))) {\n stop('point should be a dataframe with colnames \"lon, lat, z, value\".')\n }\n \n #ggplot\n #for the aes option in ggplot, it's independent from any other command through all ggplot, and aes() function\n #get data from the main dataset, in this case, data_ggplot. for other functions in ggplot, if it wants to use \n #data from the main dataset as parameters, it has to use aes() function. if not, it has to use data available \n #in the environment.\n #in other words, all the parameters in aes(), they have to come from the main dataset. Otherwise, just put them\n #outside aes() as normal parameters.\n \n if (info == TRUE) { \n plotMax <- round(max(matrix, na.rm = TRUE), 2)\n plotMin <- round(min(matrix, na.rm = TRUE), 2)\n plotMean <- round(mean(matrix, na.rm = TRUE), 2)\n plotMedian <- round(median(matrix, na.rm = TRUE), 2)\n word <- paste('\\n\\n', paste('Max', '=', plotMax), ',', paste('Min', '=', plotMin), ',',\n paste('Mean', '=', plotMean), ',', paste('Median', '=', plotMedian))\n } else {\n word <- NULL\n }\n \n x_word <- paste('Longitude', word)\n world_map <- map_data('world')\n \n # For some cases, matrix has to be reshaped, because it's too fat or too slim, to make\n # it shown on the map, the ratio is x : y is 4 : 3.\n matrix <- reshapeMatrix(matrix)\n \n \n # cannot remove NA, or the matrix shape will be changed.\n data_ggplot <- melt(matrix, na.rm = FALSE) \n \n colnames(data_ggplot) <- c('lat', 'lon', 'value')\n theme_set(theme_bw())\n \n if (is.null(color)) color <- c('yellow', 'orange', 'red')\n # if (is.null(color)) color <- rev(rainbow(n = 20, end = 0.7))\n \n mainLayer <- with(data_ggplot, {\n \n ggplot(data = data_ggplot) +\n geom_tile(aes(x = lon, y = lat, fill = value)) +\n #scale_fill_discrete()+\n scale_fill_gradientn(colours = color, na.value = 'transparent') +#usually scale = 'sqrt'\n #guide = guide_colorbar, colorbar and legend are not the same.\n guides(fill = guide_colourbar(title='Rainfall (mm)', barheight = rel(9), trans = scale)) +#usually scale = 'sqrt'\n geom_map(data = world_map, map = world_map, aes(map_id = region), fill = 'transparent', \n color='black') +\n # guides(fill = guide_colorbar(title='Rainfall (mm)', barheight = 15))+\n xlab(x_word) +\n ylab('Latitude') +\n ggtitle(title_d) +\n labs(empty = NULL, ...) +#in order to pass \"...\", arguments shouldn't be empty.\n theme(plot.title = element_text(size = rel(1.8), face = 'bold'),\n axis.title.x = element_text(size = rel(1.7)),\n axis.title.y = element_text(size = rel(1.7)),\n axis.text.x = element_text(size = rel(1.9)),\n axis.text.y = element_text(size = rel(1.9)),\n legend.text = element_text(size = rel(1.3)),\n legend.title = element_text(size = rel(1.3)))\n# coord_fixed(ratio = 1, xlim = xlim, ylim = ylim)\n \n# geom_rect(xmin=min(lon)+0.72*(max(lon)-min(lon)),\n# xmax=min(lon)+0.99*(max(lon)-min(lon)),\n# ymin=min(lat)+0.02*(max(lat)-min(lat)),\n# ymax=min(lat)+0.28*(max(lat)-min(lat)),\n# fill='white',colour='black')+\n# annotate('text', x = min(lon), y = min(lat), label=word, hjust = 0, vjust = -1)\n \n })\n \n printLayer <- mainLayer\n \n #catchment conversion\n if (is.null(catchment) == FALSE) {\n a <- catchment\n a@data$id <- rownames(a@data)\n b <- fortify(a, region = 'id')\n c <- join(b, a@data, by = 'id')\n catchmentLayer <- with(c, {\n geom_polygon(data = c, aes(long, lat, group = group), color = 'black', \n fill = 'transparent')\n })\n \n \n printLayer <- printLayer + catchmentLayer\n }\n #plot point\n if (is.null(point) == FALSE) {\n pointLayer <- with(point, {\n geom_point(data = point, aes(x = lon, y = lat, size = value, colour = z),\n guide = guide_legend(barheight = rel(3)))\n \n \n })\n \n printLayer <- printLayer + pointLayer\n }\n \n print(printLayer)\n \n if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n data_ggplot$Name <- rep(name, dim(data_ggplot)[1])\n return (data_ggplot)\n } else if (output == 'plot') {\n return(printLayer)\n } else {\n return(matrix)\n }\n}\n\n\n#' Combine maps together\n#' @param ... different maps generated by \\code{getSpatialMap(, output = 'ggplot')}, see details.\n#' @param nrow A number showing the number of rows.\n#' @param list If input is a list containing different ggplot data, use \\code{list = inputlist}.\n#' @param x A string of x axis name.\n#' @param y A string of y axis name.\n#' @param title A string of the title.\n#' @param output A boolean, if chosen TRUE, the output will be given.\n#' @return A combined map.\n#' @examples\n#' \n#' \n#' \\dontrun{\n#' data(tgridData)# the result of \\code{\\link{loadNcdf}}\n#' #The output should be 'ggplot'\n#' a1 <- getSpatialMap(tgridData, method = 'summer', output = 'ggplot', name = 'a1')\n#' a2 <- getSpatialMap(tgridData, method = 'winter', output = 'ggplot', name = 'a2')\n#' a3 <- getSpatialMap(tgridData, method = 'mean', output = 'ggplot', name = 'a3')\n#' a4 <- getSpatialMap(tgridData, method = 'max', output = 'ggplot', name = 'a4')\n#' getSpatialMap_comb(a1, a2)\n#' \n#' # or you can put them into a list.\n#' getSpatialMap_comb(list = list(a1, a2), nrow = 2)\n#' }\n#' \n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @details\n#' For \\code{getSpatialMap_comb}, the maps to be compared should be with same size and resolution, \n#' in other words, they should be fully overlapped by each other.\n#' \n#' If they have different resolutions, use \\code{interpGridData{ecomsUDG.Raccess}} to interpolate.\n#' \n#' @export\n#' @import ggplot2 maps\n#' @importFrom data.table rbindlist\n#' @references \n#' \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' }\ngetSpatialMap_comb <- function(..., list = NULL, nrow = 1, x = '', y = '', title = '', \n output = FALSE) {\n \n \n if (!is.null(list)) {\n data_ggplot <- rbindlist(list)\n } else {\n maps <- list(...)\n checkBind(maps, 'rbind')\n data_ggplot <- rbindlist(maps)\n }\n \n if (!class(data_ggplot) == 'data.frame') {\n warning('Your input is probably a list, but you forget to add \"list = \" before it.\n Try again, or check help for more information.')\n } else if (is.null(data_ggplot$Name)) {\n stop('No \"Name\" column in the input data, check the arguments in getSpatialMap(), if \n output = \"ggplot\" is assigned, more info please check ?getSpatialMap().')\n }\n \n data_ggplot$Name <- factor(data_ggplot$Name, levels = unique(data_ggplot$Name), ordered = TRUE)\n \n# lim <- getLim(data_ggplot$lon, data_ggplot$lat)\n# xlim <- lim[[1]] \n# ylim <- lim[[2]]\n \n world_map <- map_data('world')\n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) + \n geom_tile(aes(x = lon, y = lat, fill = value)) +\n #scale_fill_gradient(high = 'red', low = 'yellow')+\n scale_fill_gradientn(colours = c('yellow', 'orange', 'red'), na.value = 'transparent') +#usually scale = 'sqrt'\n geom_map(data = world_map, map = world_map, aes(map_id = region), fill = 'transparent', color = 'black') +\n# guides(fill = guide_colourbar(title='Rainfall (mm)', barheight = rel(9), trans = scale)) +#\n facet_wrap(~ Name, nrow = nrow) +\n theme(plot.title = element_text(size = rel(1.8), face = 'bold'),\n axis.title.x = element_text(size = rel(1.7)),\n axis.title.y = element_text(size = rel(1.7)),\n axis.text.x = element_text(size = rel(1.9)),\n axis.text.y = element_text(size = rel(1.9)),\n legend.text = element_text(size = rel(1.3)),\n legend.title = element_text(size = rel(1.3))) +\n # no solultion for some very fat or very slim, in facet ggplot2, so, it's not buitiful.\n #coord_equal() +\n labs(x = x, y = y, title = title)\n })\n \n \n suppressWarnings(print(mainLayer))\n \n if (output == TRUE) return(data_ggplot)\n}\n\n\n\nreshapeMatrix <- function(matrix) {\n # This is for the map plot to keep the ratio x : y == 4:3\n # mainly used in map plot in ggplot2.\n \n \n # So the input matrix should be reshaped, add in some NA values, \n # in order to be shown appropriately in ggplot.\n \n lon <- as.numeric(colnames(matrix))\n lat <- as.numeric(rownames(matrix))\n \n dx <- mean(diff(lon))\n dy <- mean(diff(lat))\n \n lx <- max(lon) - min(lon)\n ly <- max(lat) - min(lat)\n \n \n if (0.75 * lx < ly) {\n # In this case, x needs to be made longer\n \n xhalf <- 0.67 * ly\n xadd <- xhalf - lx / 2\n # calculate how many columns needs to be added.\n nxadd <- abs(round(xadd / dx))\n \n madd1 <- matrix(data = NA, nrow = length(lat), ncol = nxadd)\n madd2 <- madd1\n colnames(madd1) <- seq(to = min(lon) - dx, length = nxadd, by = dx)\n colnames(madd2) <- seq(from = max(lon) + dx, length = nxadd, by = dx)\n \n matrix_new <- cbind(madd1, matrix, madd2) \n \n \n } else if (0.75 * lx > ly) {\n \n yhalf <- 0.38 * lx\n yadd <- yhalf - ly / 2\n nyadd <- abs(round(yadd / dy))\n \n madd1 <- matrix(data = NA, nrow = nyadd, ncol = length(lon))\n madd2 <- madd1 \n \n rownames(madd1) <- seq(to = max(lat) + dy, length = nyadd, by = -dy)\n rownames(madd2) <- seq(from = min(lat) - dx, length = nyadd, by = -dy)\n \n matrix_new <- rbind(madd1, matrix, madd2)\n \n } else {\n matrix_new <- matrix\n }\n \n return(matrix_new)\n}\n", + "created" : 1487522683331.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "431235859", + "id" : "F28DEBD3", + "lastKnownWriteTime" : 1488017859, + "last_content_update" : 1488017859979, + "path" : "~/GitHub/hyfo/R/getSpatialMap.R", + "project_path" : "R/getSpatialMap.R", + "properties" : { + }, + "relative_order" : 18, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F72259DF b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F72259DF new file mode 100644 index 0000000..08abd01 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F72259DF @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/analyzeTS.R\n\\name{plotTS_comb}\n\\alias{plotTS_comb}\n\\title{Combine time seires plot together}\n\\usage{\nplotTS_comb(..., nrow = 1, type = \"line\", list = NULL, x = \"Date\",\n y = \"\", title = \"\", output = FALSE)\n}\n\\arguments{\n\\item{...}{different time series plots generated by \\code{plotTS(, output = 'ggplot')}, refer to details.}\n\n\\item{nrow}{A number showing the number of rows.}\n\n\\item{type}{A string showing 'line' or 'bar'.}\n\n\\item{list}{If input is a list containing different ggplot data, use l\\code{list = inputlist}.}\n\n\\item{x}{A string of x axis name.}\n\n\\item{y}{A string of y axis name.}\n\n\\item{title}{A string of the title.}\n\n\\item{output}{A boolean, if chosen TRUE, the output will be given.\nNOTE: yOU HAVE TO PUT A \\code{list = }, before your list.}\n}\n\\value{\nA combined time series plot.\n}\n\\description{\nCombine time seires plot together\n}\n\\details{\n..., representing different ouput file generated by \\code{plotTS(, output = 'ggplot'), name = yourname}, \ndifferent names must be assigned when generating different output.\n\ne.g.\na1, a2, a3 are different files generated by \\code{plotTS(, output = 'ggplot'), name = yourname}, you can\nset \\code{plotTS(a1,a2,a3)} or \\code{plotTS(list = list(a1,a2,a3))}\n}\n\\examples{\na1 <- plotTS(testdl[[1]], output = 'ggplot', name = 1)\na2 <- plotTS(testdl[[2]], output = 'ggplot', name = 2)\n\nplotTS_comb(a1, a2)\nplotTS_comb(list = list(a1, a2), y = 'y axis', nrow = 2)\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n}\n}\n\n", + "created" : 1488014900544.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "1328983654", + "id" : "F72259DF", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/plotTS_comb.Rd", + "project_path" : "man/plotTS_comb.Rd", + "properties" : { + }, + "relative_order" : 45, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F8BC78A3 b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F8BC78A3 new file mode 100644 index 0000000..1e819d2 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F8BC78A3 @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "#' plot time series, with marks on missing value.\n#' \n#' @param ... input time series.\n#' @param type A string representing the type of the time series, e.g. 'line' or 'bar'.\n#' @param output A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \n#' data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\n#' plots afterwards. \n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{plotTS_comb}.\n#' @param plot representing the plot type, there are two types, \"norm\" and \"cum\", \"norm\" gives an normal\n#' plot, and \"cum\" gives a cumulative plot. Default is \"norm\".\n#' @param showNA A boolean representing whether the NA values should be marked, default is TRUE.\n#' @param x label for x axis.\n#' @param y label for y axis.\n#' @param title plot title.\n#' @param list If your input is a list of time series, then use \\code{list = your time sereis list}\n#' @return A plot of the input time series.\n#' @details \n#' If your input has more than one time series, the program will only plot the common period of \n#' different time series.\n#' @examples\n#' plotTS(testdl[[1]])\n#' plotTS(testdl[[1]], x = 'xxx', y = 'yyy', title = 'aaa')\n#' \n#' # If input is a datalist\n#' plotTS(list = testdl)\n#' \n#' # Or if you want to input time series one by one\n#' # If plot = 'cum' then cumulative curve will be plotted.\n#' plotTS(testdl[[1]], testdl[[2]], plot = 'cum')\n#' \n#' # You can also directly plot multicolumn dataframe\n#' dataframe <- list2Dataframe(extractPeriod(testdl, commonPeriod = TRUE))\n#' plotTS(dataframe, plot = 'cum')\n#' \n#' # Sometimes you may want to process the dataframe and compare with the original one\n#' dataframe1 <- dataframe\n#' dataframe1[, 2:4] <- dataframe1[, 2:4] + 3\n#' plotTS(dataframe, dataframe1, plot = 'cum')\n#' # But note, if your input is a multi column dataframe, it's better to plot one using plotTS,\n#' # and compare them using plotTS_comb. If all data are in one plot, there might be too messy.\n#' \n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' }\n#' \n#' @import ggplot2\n#' @importFrom reshape2 melt\n#' @export\nplotTS <- function(..., type = 'line', output = 'data', plot = 'norm', name = NULL, showNA = TRUE, \n x = NULL, y = NULL, title = NULL, list = NULL) {\n ## arrange input TS or TS list.\n if (is.null(list)) {\n list <- list(...)\n if (!class(list[[1]]) == 'data.frame') {\n warning('Your input is probably a list, but you forget to add \"list = \" before it.\n Try again, or check help for more information.')\n }\n# Following part is for plot different time series with different date, but too complicated\n# using ggplot. and normal use doesn't need such process. So save it as backup.\n# listNames <- names(list)\n# # in order to be used later to differentiate lists, there should be a name for each element.\n# # Then assign the name column to each list element.\n# if (is.null(listNames)) listNames <- 1:length(list)\n# \n# giveName <- function(x, y) {\n# colnames(x) <- NULL\n# x$TSname <- rep(listNames[y], nrow(x))\n# return(x)\n# }\n# list1 <- mapply(FUN = giveName, x = list, y = 1:length(list), SIMPLIFY = FALSE)\n# \n# checkBind(list1, 'rbind')\n# \n# TS <- do.call('rbind', list1)\n }\n \n list_common <- extractPeriod(list, commonPeriod = TRUE)\n TS <- list2Dataframe(list_common)\n \n if (!is.null(names(list)) & (ncol(TS) - 1) == length(list)) colnames(TS)[2:(length(list) + 1)] <- names(list)\n \n # Check input, only check the first column and first row.\n if (!grepl('-|/', TS[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.')\n }\n \n TS[, 1] <- as.Date(TS[, 1])\n colnames(TS)[1] <- 'Date'\n \n # first column's name may not be Date, so change its name to Date\n \n data_plot <- melt(TS, id.var = 'Date')\n NAIndex <- which(is.na(data_plot$value))\n \n # assign 0 to NA values\n if (plot == 'norm') {\n data_plot$value[NAIndex] <- 0\n lineSize <- 0.7\n } else if (plot == 'cum') {\n TS[is.na(TS)] <- 0\n cum <- cbind(data.frame(Date = TS[, 1]), cumsum(TS[2:ncol(TS)]))\n \n data_plot <- melt(cum, id.var = 'Date')\n lineSize <- 1\n }\n \n \n # Assigning x, y and title\n if (is.null(x)) x <- colnames(TS)[1]\n # y aixs cannot decide if it's a multi column dataframe\n #if (is.null(y)) y <- names[2]\n \n theme_set(theme_bw())\n mainLayer <- with(data_plot, {\n ggplot(data = data_plot) +\n # It's always better to use colname to refer to\n aes(x = Date, y = value, color = variable) +\n theme(plot.title = element_text(size = rel(1.8), face = 'bold'),\n axis.text.x = element_text(size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8))) +\n labs(x = x, y = y, title = title)\n })\n \n \n# color <- 'dodgerblue4'\n if (type == 'bar') {\n secondLayer <- with(data_plot, {\n geom_bar(stat = 'identity')\n })\n } else if (type == 'line') {\n secondLayer <- with(data_plot, {\n geom_line(size = lineSize)\n })\n } else {\n stop(\"No such plot type.\")\n }\n \n if (showNA == TRUE) {\n missingVLayer <- with(TS, {\n geom_point(data = data_plot[NAIndex, ], group = 1, size = 3, shape = 4, color = 'black')\n })\n \n mainLayer <- mainLayer + missingVLayer\n }\n \n \n plotLayer <- mainLayer + secondLayer\n \n print(plotLayer) \n \n if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n \n data_plot$name <- rep(name, nrow(data_plot)) \n data_plot$nav <- rep(0, nrow(data_plot))\n data_plot$nav[NAIndex] <- 1\n return(data_plot)\n }\n}\n\n\n\n\n#' Combine time seires plot together\n#' @param ... different time series plots generated by \\code{plotTS(, output = 'ggplot')}, refer to details.\n#' @details\n#' ..., representing different ouput file generated by \\code{plotTS(, output = 'ggplot'), name = yourname}, \n#' different names must be assigned when generating different output.\n#' \n#' e.g.\n#' a1, a2, a3 are different files generated by \\code{plotTS(, output = 'ggplot'), name = yourname}, you can\n#' set \\code{plotTS(a1,a2,a3)} or \\code{plotTS(list = list(a1,a2,a3))}\n#' \n#' @param nrow A number showing the number of rows.\n#' @param type A string showing 'line' or 'bar'.\n#' @param list If input is a list containing different ggplot data, use l\\code{list = inputlist}.\n#' @param x A string of x axis name.\n#' @param y A string of y axis name.\n#' @param title A string of the title.\n#' @param output A boolean, if chosen TRUE, the output will be given.\n#' NOTE: yOU HAVE TO PUT A \\code{list = }, before your list.\n#' @return A combined time series plot.\n#' @examples\n#' a1 <- plotTS(testdl[[1]], output = 'ggplot', name = 1)\n#' a2 <- plotTS(testdl[[2]], output = 'ggplot', name = 2)\n#' \n#' plotTS_comb(a1, a2)\n#' plotTS_comb(list = list(a1, a2), y = 'y axis', nrow = 2)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' }\n#' @export\n#' @import ggplot2\n#' @importFrom data.table rbindlist\nplotTS_comb <- function(..., nrow = 1, type = 'line', list = NULL, x = 'Date', y = '', title = '', \n output = FALSE){\n # In ploting the time series, since the data comes from outside of hyfo, \n # It's more complicated, since they don't always have the same\n # column name, if not, there is not possible to do rbind.\n # So we need to first save the name, and rbind, and put back the name.\n \n if (!is.null(list)) {\n checkBind(list, 'rbind')\n #data_ggplot <- do.call('rbind', list)\n data_ggplot <- rbindlist(list)\n } else {\n \n bars <- list(...)\n checkBind(bars, 'rbind')\n #data_ggplot <- do.call('rbind', bars)\n data_ggplot <- rbindlist(bars)\n }\n \n if (!class(data_ggplot)[1] == 'data.table') {\n warning('Your input is probably a list, but you forget to add \"list = \" before it.\n Try again, or check help for more information.')\n } else if (is.null(data_ggplot$name)) {\n stop('No \"name\" column in the input data, check the arguments in getPreciBar(), if \n output = \"ggplot\" is assigned, more info please check ?getPreciBar.')\n }\n\n \n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) +\n # It's always better to use colname to refer to\n aes(x = Date, y = value, color = variable) +\n theme(plot.title = element_text(size = rel(1.8), face = 'bold'),\n axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8))) +\n geom_point(data = data_ggplot[data_ggplot$nav == 1, ], size = 2, shape = 4, color = 'red') +\n facet_wrap( ~ name, nrow = nrow) +\n labs(x = x, y = y, title = title)\n \n })\n \n \n if (type == 'bar') {\n secondLayer <- with(data_ggplot, {\n geom_bar(stat = 'identity', size = 1)\n })\n } else if (type == 'line') {\n secondLayer <- with(data_ggplot, {\n geom_line(size = 1)\n })\n } else {\n stop(\"No such plot type.\")\n }\n \n print(mainLayer + secondLayer)\n \n if (output == TRUE) return(data_ggplot)\n}\n\n\n\n\n#' get L moment analysis of the input distribution\n#' \n#' @param dis A distribution, for hydrology usually a time series with only data column without time.\n#' @return The mean, L-variation, L-skewness and L-kurtosis of the input distribution\n#' @examples\n#' dis <- seq(1, 100)\n#' getLMom(dis)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @references \n#' \n#' \\itemize{\n#' \\item J. R. M. Hosking (2015). L-moments. R package, version 2.5. URL:\n#' https://CRAN.R-project.org/package=lmom.\n#' }\n#' \n#' \n#' @importFrom lmom samlmu\n#' \ngetLMom <- function(dis){\n \n LMom <- samlmu(dis, nmom = 4, ratios = TRUE)\n \n mean <- LMom[1]\n LCV <- LMom[2]/LMom[1]\n Lskew <- LMom[3]\n Lkur <- LMom[4]\n \n output <- data.frame(mean = mean, Lcv = LCV, Lskew = Lskew, Lkur = Lkur)\n return(output)\n}\n\n#' get moment analysis of the input distribution\n#' \n#' @param dis A distribution, for hydrology usually a time series with only data column without time.\n#' @return The mean, variation, skewness and kurtosis of the input distribution\n#' @examples\n#' dis <- seq(1, 100)\n#' getMoment(dis)\n#' \n#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @references \n#' \n#' \\itemize{\n#' \\item Lukasz Komsta and Frederick Novomestky (2015). moments: Moments, cumulants, skewness, kurtosis and\n#' related tests. R package version 0.14. https://CRAN.R-project.org/package=moments\n#' \n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n#' }\n#' \n#' @importFrom moments skewness kurtosis\n#' @importFrom stats var\ngetMoment <- function(dis) {\n mean <- mean(dis, na.rm = TRUE)\n variance <- var(dis, na.rm = TRUE)\n skewness <- skewness(dis, na.rm = TRUE)\n kurtosis <- kurtosis(dis, na.rm = TRUE)\n \n output <- data.frame(mean=mean, Variance = variance, Skewness = skewness, Kurtosis = kurtosis)\n \n return(output)\n}\n", + "created" : 1487441178501.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "2041803482", + "id" : "F8BC78A3", + "lastKnownWriteTime" : 1488015065, + "last_content_update" : 1488015065997, + "path" : "~/GitHub/hyfo/R/analyzeTS.R", + "project_path" : "R/analyzeTS.R", + "properties" : { + }, + "relative_order" : 11, + "source_on_save" : false, + "source_window" : "", + "type" : "r_source" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F8CC347F b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F8CC347F new file mode 100644 index 0000000..7f2be6b --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/F8CC347F @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/extractPeriod(generic).R\n\\docType{methods}\n\\name{extractPeriod}\n\\alias{extractPeriod}\n\\alias{extractPeriod,data.frame-method}\n\\alias{extractPeriod,list-method}\n\\title{Extract period from list or dataframe.}\n\\usage{\nextractPeriod(data, startDate = NULL, endDate = NULL,\n commonPeriod = FALSE, year = NULL, month = NULL)\n\n\\S4method{extractPeriod}{data.frame}(data, startDate = NULL, endDate = NULL,\n commonPeriod = FALSE, year = NULL, month = NULL)\n\n\\S4method{extractPeriod}{list}(data, startDate = NULL, endDate = NULL,\n commonPeriod = FALSE, year = NULL, month = NULL)\n}\n\\arguments{\n\\item{data}{A list of different dataframes of time series, or a dataframe with first column Date, the rest columns value.}\n\n\\item{startDate}{A Date showing the start of the extract period, default as NULL, check details.}\n\n\\item{endDate}{A Date showing the end of the extract period, default as NULL, check details.}\n\n\\item{commonPeriod}{A boolean showing whether the common period is extracted. If chosen, startDate and endDate\nshould be NULL.}\n\n\\item{year}{extract certain year in the entire time series. if you want to extract year 2000, set \\code{year = 2000}}\n\n\\item{month}{extract certain months in a year. e.g. if you want to extract Jan, Feb of each year, \nset \\code{month = c(1, 2)}.}\n}\n\\value{\nA list or a dataframe with all the time series inside containing the same period.\n}\n\\description{\nExtract common period or certain period from a list of different dataframes of time series, or from a \ndataframe. \nNOTE: all the dates in the datalist should follow the format in ?as.Date{base}.\n}\n\\details{\n\\strong{startDate and endDate}\n\nIf startDate and endDate are assigned, then certain period between startDate and endDate will be returned, \nfor both datalist input and dataframe input.\n\nIf startDate and endDate are NOT assigned, then,\n\n if input is a datalist, the startDate and endDate of the common period of different datalists will be assigned\n to the startDate and endDate.\n\n if input is a dataframe, the startDate and endDate of the input dataframe will be assigned to the startDate\n and endDate . Since different value columns share a common Date column in a dataframe input. \n\n\\strong{year and month}\n\nFor year crossing month input, hyfo will take from the year before. E.g. if \\code{month = c(10, 11, 12, 1)},\nand \\code{year = 1999}, hyfo will take month 10, 11 and 12 from year 1998, and month 1 from 1999.You DO NOT \nhave to set \\code{year = 1998 : 1999}.\n\nWell, if you set \\code{year = 1998 : 1999}, hyfo will take month 10, 11 and 12 from year 1997, and month 1 from 1998,\nthen, take month 10, 11 and 12 from year 1998, month 1 from 1999. So you only have to care about the latter year.\n\nIt is a generic function, if in your case you need to debug, please see \\code{?debug()} \nfor how to debug S4 method.\n}\n\\examples{\n# Generate timeseries datalist. Each data frame consists of a Date and a value.\n\nAAA <- data.frame(\n# date column\nDate = seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1),\n # value column\nAAA = sample(1:100,length(seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1)), repl = TRUE))\n\nBBB <- data.frame(\nDate = seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1), \nBBB = sample(1:100,length(seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1)), repl = TRUE))\n \nCCC <- data.frame(\nDate = seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1), \nCCC = sample(1:100,length(seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1)), repl = TRUE)) \n\nlist <- list(AAA, BBB, CCC)# dput() and dget() can be used to save and load list file.\n\nlist_com <- extractPeriod(list, commonPeriod = TRUE)\n\n# list_com is the extracted datalist.\nstr(list_com)\n\n# If startDate and endDate is provided, the record between them will be extracted.\n# make sure startDate is later than any startDate in each dataframe and endDate is \n# earlier than any endDate in each dataframe.\n\ndata(testdl)\ndatalist_com1 <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n\n\ndataframe <- list2Dataframe(datalist_com1)\n# now we have a dataframe to extract certain months and years.\ndataframe_new <- extractPeriod(dataframe, month = c(1,2,3))\ndataframe_new <- extractPeriod(dataframe, month = c(12,1,2), year = 1995)\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n}\n\\references{\n\\itemize{\n\\item Achim Zeileis and Gabor Grothendieck (2005). zoo: S3 Infrastructure for Regular and Irregular Time\nSeries. Journal of Statistical Software, 14(6), 1-27. URL https://www.jstatsoft.org/v14/i06/\n}\n}\n\n", + "created" : 1487956160878.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "992803177", + "id" : "F8CC347F", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/extractPeriod.Rd", + "project_path" : "man/extractPeriod.Rd", + "properties" : { + }, + "relative_order" : 27, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/FFE783F b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/FFE783F new file mode 100644 index 0000000..1e6da70 --- /dev/null +++ b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/FFE783F @@ -0,0 +1,20 @@ +{ + "collab_server" : "", + "contents" : "% Generated by roxygen2: do not edit by hand\n% Please edit documentation in R/biasCorrect(generic).R\n\\docType{methods}\n\\name{biasCorrect}\n\\alias{biasCorrect}\n\\alias{biasCorrect,data.frame,data.frame,data.frame-method}\n\\alias{biasCorrect,list,list,list-method}\n\\title{Biascorrect the input timeseries or hyfo dataset}\n\\usage{\nbiasCorrect(frc, hindcast, obs, method = \"scaling\", scaleType = \"multi\",\n preci = FALSE, prThreshold = 0, extrapolate = \"no\")\n\n\\S4method{biasCorrect}{data.frame,data.frame,data.frame}(frc, hindcast, obs,\n method = \"scaling\", scaleType = \"multi\", preci = FALSE,\n prThreshold = 0, extrapolate = \"no\")\n\n\\S4method{biasCorrect}{list,list,list}(frc, hindcast, obs, method = \"scaling\",\n scaleType = \"multi\", preci = FALSE, prThreshold = 0,\n extrapolate = \"no\")\n}\n\\arguments{\n\\item{frc}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \nrepresenting the forecast to be calibrated.}\n\n\\item{hindcast}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \nrepresenting the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as\nobservation data. Check details for more information.}\n\n\\item{obs}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \nrepresenting the observation data.}\n\n\\item{method}{bias correct method, including 'delta', 'scaling'..., default is 'scaling'}\n\n\\item{scaleType}{only when the method \"scaling\" is chosen, scaleType will be available. Two different types\nof scaling method, 'add' and 'multi', which means additive and multiplicative scaling method. More info check \ndetails. Default scaleType is 'multi'.}\n\n\\item{preci}{If the precipitation is biascorrected, then you have to assign \\code{preci = TRUE}. Since for\nprecipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. \nDefault is FALSE, refer to details.}\n\n\\item{prThreshold}{The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).}\n\n\\item{extrapolate}{When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs.\nIf 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.}\n}\n\\description{\nBiascorrect the input time series or dataset, the input time series or dataset should consist of observation, hindcast, and forecast.\nobservation and hindcast should belong to the same period, in order to calibrate. Then the modified forecast\nwill be returned. If the input is a time series, first column should be date column and rest columns should be \nthe value column. If the input is a hyfo dataset, the dataset should be the result of \\code{loadNcdf}, or a list\nfile with the same format.\n}\n\\details{\nSince climate forecast is based on global condition, when downscaling to different regions, it may include\nsome bias, biascorrection is used then to fix the bias.\n\n\\strong{Hindcast}\n\nIn order to bias correct, we need to pick up some data from the forecast to train with\nthe observation, which is called hindcast in this function. Using hindcast and observation, \nthe program can analyze the bias and correct the bias in the forecast. \n\nHindcast should have \\strong{EVERY} attributes that forecast has.\n\nHindcast is also called re-forecast, is the forecast of the past. E.g. you have a forecast from year 2000-2010, assuming now you are in 2005. So from 2000-2005, this period\nis the hindcast period, and 2005-2010, this period is the forecast period.\n\nHindcast can be the same as forecast, i.e., you can use forecast itself as hindcast to train the bias correction.\n\n\n\\strong{How it works}\n\nForecast product has to be calibrated, usually the system is doing forecast in real time. So, e.g., if the \nforecast starts from year 2000, assuming you are in year 2003, then you will have 3 years' hindcast \ndata (year 2000-2003), which can be used to calibrate. And your forecast period is (2003-2004)\n\nE.g. you have observation from 2001-2002, this is your input obs. Then you can take the same \nperiod (2001-2002) from the forecast, which is the hindcast period. For forecast, you can take any period.\nThe program will evaluate the obs and hindcast, to get the modification of the forecast, and then add the \nmodification to the forecast data.\n\nThe more categorized input, the more accurate result you will get. E.g., if you want to \nbias correct a forecast for winter season. So you'd better to extract all the winter period\nin the hindcast and observation to train. \\code{extractPeriod} can be used for this purpose.\n\n\\strong{method}\n\nDifferent methods used in the bias correction. Among which, delta, scaling can be applied\nto different kinds of parameters, with no need to set \\code{preci}; eqm has two conditions for rainfall data and other data,\nit needs user to input \\code{preci = TRUE/FALSE} to point to different conditions; gqm is\ndesigned for rainfall data, so \\code{preci = TRUE} needs to be set.\n\n\\strong{delta}\n\nThis method consists on adding to the observations the mean change signal (delta method). \nThis method is applicable to any kind of variable but it is preferable to avoid it for bounded variables\n (e.g. precipitation, wind speed, etc.) because values out of the variable range could be obtained \n (e.g. negative wind speeds...)\n \n\\strong{scaling}\n \nThis method consists on scaling the simulation with the difference (additive) or quotient (multiplicative) \nbetween the observed and simulated means in the train period. The \\code{additive} or \\code{multiplicative}\ncorrection is defined by parameter \\code{scaling.type} (default is \\code{additive}).\nThe additive version is preferably applicable to unbounded variables (e.g. temperature) \nand the multiplicative to variables with a lower bound (e.g. precipitation, because it also preserves the frequency). \n \n \\strong{eqm}\n \nEmpirical Quantile Mapping. This is a very extended bias correction method which consists on calibrating the simulated Cumulative Distribution Function (CDF) \nby adding to the observed quantiles both the mean delta change and the individual delta changes in the corresponding quantiles. \nThis method is applicable to any kind of variable.\n\nIt can keep the extreme value, if you choose constant extrapolation method. But then you will face the risk\nthat the extreme value is an error.\n \n \\strong{gqm}\n \nGamma Quantile Mapping. This method is described in Piani et al. 2010 and is applicable only to precipitation. It is based on the initial assumption that both observed\nand simulated intensity distributions are well approximated by the gamma distribution, therefore is a parametric q-q map \nthat uses the theorical instead of the empirical distribution. \n \nIt can somehow filter some extreme values caused by errors, while keep the extreme value. Seems more reasonable.\nBetter have a long period of training, and the if the forecast system is relatively stable.\n\nIt is a generic function, if in your case you need to debug, please see \\code{?debug()} \nfor how to debug S4 method.\n}\n\\examples{\n\n######## hyfo grid file biascorrection\n########\n\n# If your input is obtained by \\\\code{loadNcdf}, you can also directly biascorrect\n# the file.\n\n# First load ncdf file.\nfilePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\nvarname <- getNcdfVar(filePath) \nnc <- loadNcdf(filePath, varname)\n\ndata(tgridData)\n# Since the example data, has some NA values, the process will include some warning #message, \n# which can be ignored in this case.\n\n\n\n\n# Then we will use nc data as forecasting data, and use itself as hindcast data,\n# use tgridData as observation.\nnewFrc <- biasCorrect(nc, nc, tgridData) \nnewFrc <- biasCorrect(nc, nc, tgridData, scaleType = 'add') \nnewFrc <- biasCorrect(nc, nc, tgridData, method = 'eqm', extrapolate = 'constant', \npreci = TRUE) \nnewFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) \n\n\n######## Time series biascorrection\n########\n\n# Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\ndata(testdl)\n\n# common period has to be extracted in order to better train the forecast.\n\ndatalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n\nfrc <- datalist[[1]]\nhindcast <- datalist[[2]]\nobs <- datalist[[3]]\n\n\n# The data used here is just for example, so there could be negative data.\n\n# default method is scaling, with 'multi' scaleType\nfrc_new <- biasCorrect(frc, hindcast, obs)\n\n# for precipitation data, extra process needs to be executed, so you have to tell\n# the program that it is a precipitation data.\n\nfrc_new1 <- biasCorrect(frc, hindcast, obs, preci = TRUE)\n\n# You can use other scaling methods to biascorrect.\nfrc_new2 <- biasCorrect(frc, hindcast, obs, scaleType = 'add')\n\n# \nfrc_new3 <- biasCorrect(frc, hindcast, obs, method = 'eqm', preci = TRUE)\nfrc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE)\n\nplotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n\n# You can also give name to this input list.\nTSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\nnames(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\nplotTS(list = TSlist, plot = 'cum')\n\n\n# If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n# for example Dec, Jan and Feb of every year from year 1999-2005.\n# In such case, you need to extract certain months and years from observed time series.\n# extractPeriod() can be then used.\n \n \n\n\n\n# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/\n\n\n}\n\\author{\nYuanchao Xu \\email{xuyuanchao37@gmail.com }\n}\n\\references{\nBias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n\n\\itemize{\n\n\\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\npackage version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n\n\\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n\n\\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n\n\\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n\n\\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n}\n}\n\n", + "created" : 1483875745269.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "3811662291", + "id" : "FFE783F", + "lastKnownWriteTime" : 1488018205, + "last_content_update" : 1488018205, + "path" : "~/GitHub/hyfo/man/biasCorrect.Rd", + "project_path" : "man/biasCorrect.Rd", + "properties" : { + }, + "relative_order" : 1, + "source_on_save" : false, + "source_window" : "", + "type" : "r_doc" +} \ No newline at end of file diff --git a/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/lock_file b/.Rproj.user/D1D10CF6/sdb/s-DA33EA29/lock_file new file mode 100644 index 0000000..e69de29 diff --git a/.Rproj.user/D1D10CF6/session-persistent-state b/.Rproj.user/D1D10CF6/session-persistent-state new file mode 100644 index 0000000..18e80d5 --- /dev/null +++ b/.Rproj.user/D1D10CF6/session-persistent-state @@ -0,0 +1 @@ +virtual-session-id="E90651FF" diff --git a/.Rproj.user/D53FD3E6/pcs/find-in-files.pper b/.Rproj.user/D53FD3E6/pcs/find-in-files.pper index 941ba9c..3bb4cdd 100644 --- a/.Rproj.user/D53FD3E6/pcs/find-in-files.pper +++ b/.Rproj.user/D53FD3E6/pcs/find-in-files.pper @@ -4,7 +4,7 @@ "filePatterns" : [ ], "path" : "E:/1/R/hyfo/R", - "query" : "grepAndmatch", + "query" : "pre", "regex" : false } } \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/pcs/source-pane.pper b/.Rproj.user/D53FD3E6/pcs/source-pane.pper index 70829f6..1743e40 100644 --- a/.Rproj.user/D53FD3E6/pcs/source-pane.pper +++ b/.Rproj.user/D53FD3E6/pcs/source-pane.pper @@ -1,3 +1,3 @@ { - "activeTab" : 1 + "activeTab" : 0 } \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/166D8D14 b/.Rproj.user/D53FD3E6/sdb/per/t/166D8D14 deleted file mode 100644 index cf5b1e7..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/166D8D14 +++ /dev/null @@ -1,18 +0,0 @@ -{ - "contents" : "\n\n\n#' Biascorrect the input timeseries or hyfo dataset\n#' \n#' Biascorrect the input time series or dataset, the input time series or dataset should consist of observation, hindcast, and forecast.\n#' observation and hindcast should belong to the same period, in order to calibrate. Then the modified forecast\n#' will be returned. If the input is a time series, first column should be date column and rest columns should be \n#' the value column. If the input is a hyfo dataset, the dataset should be the result of \\code{loadNcdf}, or a list\n#' file with the same format. \n#' \n#' \n#' @param frc a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the forecast to be calibrated.\n#' @param hindcast a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \n#' representing the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as\n#' observation data. Check details for more information.\n#' @param obs a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the observation data.\n#' @param method bias correct method, including 'delta', 'scaling'..., default is 'scaling'\n#' @param scaleType only when the method \"scaling\" is chosen, scaleType will be available. Two different types\n#' of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method. More info check \n#' details. Default scaleType is 'multi'.\n#' @param preci If the precipitation is biascorrected, then you have to assign \\code{preci = TRUE}. Since for\n#' precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. \n#' Default is FALSE, refer to details.\n#' @param prThreshold The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).\n#' @param extrapolate When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs.\n#' If 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.\n#' @details \n#' \n#' Since climate forecast is based on global condition, when downscaling to different regions, it may include\n#' some bias, biascorrection is used then to fix the bias.\n#' \n#' \\strong{Hindcast}\n#' \n#' In order to bias correct, we need to pick up some data from the forecast to train with\n#' the observation, which is called hindcast in this function. Using hindcast and observation, \n#' the program can analyze the bias and correct the bias in the forecast. \n#' \n#' Hindcast should have \\strong{EVERY} attributes that forecast has.\n#' \n#' Hindcast is also called re-forecast, is the forecast of the past. E.g. you have a forecast from year 2000-2010, assuming now you are in 2005. So from 2000-2005, this period\n#' is the hindcast period, and 2005-2010, this period is the forecast period.\n#'\n#' Hindcast can be the same as forecast, i.e., you can use forecast itself as hindcast to train the bias correction.\n#'\n#'\n#' \\strong{How it works}\n#' \n#' Forecast product has to be calibrated, usually the system is doing forecast in real time. So, e.g., if the \n#' forecast starts from year 2000, assuming you are in year 2003, then you will have 3 years' hindcast \n#' data (year 2000-2003), which can be used to calibrate. And your forecast period is (2003-2004)\n#' \n#' E.g. you have observation from 2001-2002, this is your input obs. Then you can take the same \n#' period (2001-2002) from the forecast, which is the hindcast period. For forecast, you can take any period.\n#' The program will evaluate the obs and hindcast, to get the modification of the forecast, and then add the \n#' modification to the forecast data.\n#' \n#' The more categorized input, the more accurate result you will get. E.g., if you want to \n#' bias correct a forecast for winter season. So you'd better to extract all the winter period\n#' in the hindcast and observation to train. \\code{extractPeriod} can be used for this purpose.\n#' \n#' \\strong{method}\n#' \n#' Different methods used in the bias correction. Among which, delta, scaling can be applied\n#' to different kinds of parameters, with no need to set \\code{preci}; eqm has two conditions for rainfall data and other data,\n#' it needs user to input \\code{preci = TRUE/FALSE} to point to different conditions; gqm is\n#' designed for rainfall data, so \\code{preci = TRUE} needs to be set.\n#' \n#' \\strong{delta}\n#' \n#' This method consists on adding to the observations the mean change signal (delta method). \n#' This method is applicable to any kind of variable but it is preferable to avoid it for bounded variables\n#' (e.g. precipitation, wind speed, etc.) because values out of the variable range could be obtained \n#' (e.g. negative wind speeds...)\n#' \n#' \\strong{scaling}\n#' \n#' This method consists on scaling the simulation with the difference (additive) or quotient (multiplicative) \n#' between the observed and simulated means in the train period. The \\code{additive} or \\code{multiplicative}\n#' correction is defined by parameter \\code{scaling.type} (default is \\code{additive}).\n#' The additive version is preferably applicable to unbounded variables (e.g. temperature) \n#' and the multiplicative to variables with a lower bound (e.g. precipitation, because it also preserves the frequency). \n#' \n#' \\strong{eqm}\n#' \n#' Empirical Quantile Mapping. This is a very extended bias correction method which consists on calibrating the simulated Cumulative Distribution Function (CDF) \n#' by adding to the observed quantiles both the mean delta change and the individual delta changes in the corresponding quantiles. \n#' This method is applicable to any kind of variable.\n#' \n#' It can keep the extreme value, if you choose constant extrapolation method. But then you will face the risk\n#' that the extreme value is an error.\n#' \n#' \\strong{gqm}\n#' \n#' Gamma Quantile Mapping. This method is described in Piani et al. 2010 and is applicable only to precipitation. It is based on the initial assumption that both observed\n#' and simulated intensity distributions are well approximated by the gamma distribution, therefore is a parametric q-q map \n#' that uses the theorical instead of the empirical distribution. \n#' \n#' It can somehow filter some extreme values caused by errors, while keep the extreme value. Seems more reasonable.\n#' Better have a long period of training, and the if the forecast system is relatively stable.\n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples \n#' \n#' ######## hyfo grid file biascorrection\n#' ########\n#' \n#' # If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect\n#' # the file.\n#' \n#' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' data(tgridData)\n#' # Since the example data, has some NA values, the process will include some warning #message, \n#' # which can be ignored in this case.\n#' \n#' \n#' \n#' \n#' # Then we will use nc data as forecasting data, and use itself as hindcast data,\n#' # use tgridData as observation.\n#' newFrc <- biasCorrect(nc, nc, tgridData) \n#' newFrc <- biasCorrect(nc, nc, tgridData, scaleType = 'add') \n#' newFrc <- biasCorrect(nc, nc, tgridData, method = 'eqm', extrapolate = 'constant', \n#' preci = TRUE) \n#' newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) \n#' \n#' \n#' ######## Time series biascorrection\n#' ########\n#' \n#' # Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\n#' data(testdl)\n#' \n#' # common period has to be extracted in order to better train the forecast.\n#' \n#' datalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' frc <- datalist[[1]]\n#' hindcast <- datalist[[2]]\n#' obs <- datalist[[3]]\n#' \n#' \n#' # The data used here is just for example, so there could be negative data.\n#' \n#' # default method is scaling, with 'multi' scaleType\n#' frc_new <- biasCorrect(frc, hindcast, obs)\n#' \n#' # for precipitation data, extra process needs to be executed, so you have to tell\n#' # the program that it is a precipitation data.\n#' \n#' frc_new1 <- biasCorrect(frc, hindcast, obs, preci = TRUE)\n#' \n#' # You can use other scaling methods to biascorrect.\n#' frc_new2 <- biasCorrect(frc, hindcast, obs, scaleType = 'add')\n#' \n#' # \n#' frc_new3 <- biasCorrect(frc, hindcast, obs, method = 'eqm', preci = TRUE)\n#' frc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE)\n#' \n#' plotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n#' \n#' # You can also give name to this input list.\n#' TSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\n#' names(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\n#' plotTS(list = TSlist, plot = 'cum')\n#' \n#' \n#' # If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n#' # for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n#' # for example Dec, Jan and Feb of every year from year 1999-2005.\n#' # In such case, you need to extract certain months and years from observed time series.\n#' # extractPeriod() can be then used.\n#' \n#' \n#'\n#'\n#'\n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' @author Yuanchao Xu \\email{xuyuanchao37@@gmail.com }\n#' @importFrom methods setMethod\n#' @export\n#' \nsetGeneric('biasCorrect', function(frc, hindcast, obs, method = 'scaling', scaleType = 'multi', \n preci = FALSE, prThreshold = 0, extrapolate = 'no') {\n standardGeneric('biasCorrect')\n})\n\n#' @describeIn biasCorrect\nsetMethod('biasCorrect', signature('data.frame', 'data.frame', 'data.frame'),\n function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- biasCorrect.TS(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n#' @describeIn biasCorrect\nsetMethod('biasCorrect', signature('list', 'list', 'list'), \n function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- biasCorrect.list(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n\nbiasCorrect.TS <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n # First check if the first column is Date\n if (!grepl('-|/', obs[1, 1]) | !grepl('-|/', hindcast[1, 1]) | !grepl('-|/', frc[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.If your input is a hyfo dataset, put input = \"hyfo\" as an\n argument, check help for more info.')\n }\n # change to date type is easier, but in case in future the flood part is added, Date type doesn't have\n # hour, min and sec, so, it's better to convert it into POSIxlt.\n \n # if condition only accepts one condition, for list comparison, there are a lot of conditions, better\n # further process it, like using any.\n if (any(as.POSIXlt(hindcast[, 1]) != as.POSIXlt(obs[, 1]))) {\n warning('time of obs and time of hindcast are not the same, which may cause inaccuracy in \n the calibration.')\n }\n n <- ncol(frc)\n \n # For every column, it's biascorrected respectively.\n frc_data <- lapply(2:n, function(x) biasCorrect_core(frc[, x], hindcast[, x], obs[, 2], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate))\n frc_data <- do.call('cbind', frc_data)\n rownames(frc_data) <- NULL\n \n names <- colnames(frc)\n frc_new <- data.frame(frc[, 1], frc_data)\n colnames(frc_new) <- names\n return(frc_new)\n}\n\nbiasCorrect.list <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n ## Check if the data is a hyfo grid data.\n checkHyfo(frc, hindcast, obs)\n \n hindcastData <- hindcast$Data\n obsData <- obs$Data\n frcData <- frc$Data\n \n ## save frc dimension order, at last, set the dimension back to original dimension\n frcDim <- attributes(frcData)$dimensions\n \n ## ajust the dimension into general dimension order.\n hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time'))\n obsData <- adjustDim(obsData, ref = c('lon', 'lat', 'time'))\n \n ## CheckDimLength, check if all the input dataset has different dimension length\n # i.e. if they all have the same lon and lat number.\n checkDimLength(frcData, hindcastData, obsData, dim = c('lon', 'lat'))\n \n \n # Now real bias correction is executed.\n \n memberIndex <- grepAndMatch('member', attributes(frcData)$dimensions)\n \n # For dataset that has a member part \n if (length(memberIndex) != 0) {\n # check if frcData and hindcastData has the same dimension and length.\n checkDimLength(frcData, hindcastData, dim = 'member')\n \n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time', 'member'))\n \n # The following code may speed up because it doesn't use for loop.\n # It firstly combine different array into one array. combine the time \n # dimension of frc, hindcast and obs. Then use apply, each time extract \n # the total time dimension, and first part is frc, second is hindcast, third\n # is obs. Then use these three parts to bias correct. All above can be written\n # in one function and called within apply. But too complicated to understand,\n # So save it for future use maybe.\n \n # for (member in 1:dim(frcData)[4]) {\n # totalArr <- array(c(frcData[,,, member], hindcastData[,,, member], obsData),\n # dim = c(dim(frcData)[1], dim(frcData)[2], \n # dim(frcData)[3] + dim(hindcastData)[3] + dim(obsData)[3]))\n # }\n \n \n for (member in 1:dim(frcData)[4]) {\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,, member] <- biasCorrect_core(frcData[lon, lat,,member], hindcastData[lon, lat,, member], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n } else {\n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time'))\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,] <- biasCorrect_core(frcData[lon, lat,], hindcastData[lon, lat,], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n \n frcData <- adjustDim(frcData, ref = frcDim)\n frc$Data <- frcData\n frc$biasCorrected_by <- method\n frc_new <- frc\n return(frc_new)\n}\n\n\n\n\n\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats ecdf quantile pgamma qgamma rgamma\n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' \n#' \n# this is only used to calculate the value column, \nbiasCorrect_core <- function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate){\n # If the variable is precipitation, some further process needs to be added.\n # The process is taken from downscaleR, to provide a more reasonable hindcast, used in the calibration.\n \n \n # check if frc, hindcast or obs are all na values\n if (!any(!is.na(obs)) | !any(!is.na(frc)) | !any(!is.na(hindcast))) {\n warning('In this cell, frc, hindcast or obs data is missing. No biasCorrection for this cell.')\n return(NA)\n }\n \n \n if (preci == TRUE) {\n preprocessHindcast_res <- preprocessHindcast(hindcast = hindcast, obs = obs, prThreshold = prThreshold)\n hindcast <- preprocessHindcast_res[[1]]\n minHindcastPreci <- preprocessHindcast_res[[2]]\n }\n \n # default is the simplest method in biascorrection, just do simple addition and subtraction.\n if (method == 'delta') {\n if (length(frc) != length(obs)) stop('This method needs frc data have the same length as obs data.')\n # comes from downscaleR biascorrection method\n frcMean <- mean(frc, na.rm = TRUE)\n hindcastMean <- mean(hindcast, na.rm = TRUE)\n frc <- obs - hindcastMean + frcMean\n \n } else if (method == 'scaling') {\n obsMean <- mean(obs, na.rm = TRUE)\n hindcastMean <- mean(hindcast, na.rm = TRUE)\n \n if (scaleType == 'multi') {\n frc <- frc / hindcastMean * obsMean\n \n } else if (scaleType == 'add') {\n frc <- frc - hindcastMean + obsMean\n }\n \n \n } else if (method == 'eqm') {\n if (preci == FALSE) {\n frc <- biasCorrect_core_eqm_nonPreci(frc, hindcast, obs, extrapolate, prThreshold)\n } else {\n frc <- biasCorrect_core_eqm_preci(frc, hindcast, obs, minHindcastPreci, extrapolate,\n prThreshold)\n }\n \n } else if (method == 'gqm') {\n if (preci == FALSE) stop ('gqm method only applys to precipitation, please set preci = T')\n frc <- biasCorrect_core_gqm(frc, hindcast, obs, prThreshold, minHindcastPreci)\n }\n \n \n return(frc)\n}\n\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats rgamma\npreprocessHindcast <- function(hindcast, obs, prThreshold) {\n lowerIndex <- length(which(obs < prThreshold))\n \n # In the original function, this minHindcastPreci is Pth[,i,j] in downscaleR, and it is originally\n # set to NA, which is not so appropriate for all the precipitations.\n # In the original function, there are only two conditions, 1. all the obs less than threshold\n # 2. there are some obs less than threshold. \n # While, if we set threshold to 0, there could be a 3rd condition, all the obs no less than threshold.\n # Here I set this situation, firstly set minHindcastPreci to the min of the hindcast. Because in future\n # use, 'eqm' method is going to use this value.\n \n # The problem above has been solved.\n \n \n if (lowerIndex >= 0 & lowerIndex < length(obs)) {\n index <- sort(hindcast, decreasing = FALSE, na.last = NA, index.return = TRUE)$ix\n hindcast_sorted <- sort(hindcast, decreasing = FALSE, na.last = NA)\n # minHindcastPreci is the min preci over threshold FOR ***HINDCAST***\n # But use obs to get the lowerIndex, so obs_sorted[lowerIndex + 1] > prThreshold, but\n # hindcast_sorted[lowerIndex + 1] may greater than or smaller than ptThreshold\n \n \n # It would be better to understand if you draw two lines: hindcast_sorted and obs_sorted\n # with y = prThreshold, you will find the difference of the two.\n \n # In principle, the value under the threshold needs to be replaced by some other reasonable value.\n # simplest way \n minHindcastPreci <- hindcast_sorted[lowerIndex + 1]\n \n # Also here if minHindcastPreci is 0 and prThreshold is 0, will cause problem, bettter set \n # I set it prThreshold != 0 \n if (minHindcastPreci <= prThreshold & prThreshold != 0) {\n obs_sorted <- sort(obs, decreasing = FALSE, na.last = NA)\n \n # higherIndex is based on hindcast\n higherIndex <- which(hindcast_sorted > prThreshold & !is.na(hindcast_sorted))\n \n if (length(higherIndex) == 0) {\n higherIndex <- max(which(!is.na(hindcast_sorted)))\n higherIndex <- min(length(obs_sorted), higherIndex)\n } else {\n higherIndex <- min(higherIndex)\n }\n # here I don't know why choose 6.\n # Written # [Shape parameter Scale parameter] in original package\n # according to the reference and gamma distribution, at least 6 values needed to fit gamma\n # distribution.\n if (length(unique(obs_sorted[(lowerIndex + 1):higherIndex])) < 6) {\n hindcast_sorted[(lowerIndex + 1):higherIndex] <- mean(obs_sorted[(lowerIndex + 1):higherIndex], \n na.rm = TRUE)\n } else {\n obsGamma <- fitdistr(obs_sorted[(lowerIndex + 1):higherIndex], \"gamma\")\n \n # this is to replace the original hindcast value between lowerIndex and higherIndex with \n # some value taken from gamma distribution just generated.\n hindcast_sorted[(lowerIndex + 1):higherIndex] <- rgamma(higherIndex - lowerIndex, obsGamma$estimate[1], \n rate = obsGamma$estimate[2])\n }\n hindcast_sorted <- sort(hindcast_sorted, decreasing = FALSE, na.last = NA)\n \n } \n minIndex <- min(lowerIndex, length(hindcast))\n hindcast_sorted[1:minIndex] <- 0\n hindcast[index] <- hindcast_sorted\n \n } else if (lowerIndex == length(obs)) {\n \n index <- sort(hindcast, decreasing = FALSE, na.last = NA, index.return = TRUE)$ix\n hindcast_sorted <- sort(hindcast, decreasing = FALSE, na.last = NA)\n minHindcastPreci <- hindcast_sorted[lowerIndex]\n \n # here is to compare with hindcast, not obs\n minIndex <- min(lowerIndex, length(hindcast))\n hindcast_sorted[1:minIndex] <- 0\n hindcast[index] <- hindcast_sorted\n \n }\n return(list(hindcast, minHindcastPreci))\n}\n\nbiasCorrect_core_eqm_nonPreci <- function(frc, hindcast, obs, extrapolate, prThreshold) {\n ecdfHindcast <- ecdf(hindcast)\n \n if (extrapolate == 'constant') {\n higherIndex <- which(frc > max(hindcast, na.rm = TRUE))\n lowerIndex <- which(frc < min(hindcast, na.rm = TRUE))\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(frc), extrapolateIndex)\n \n # In case extrapolateIndex is of length zero, than extrapolate cannot be used afterwards\n # So use setdiff(1:length(sim), extrapolateIndex), if extrapolateIndex == 0, than it will\n # return 1:length(sim)\n \n if (length(higherIndex) > 0) {\n maxHindcast <- max(hindcast, na.rm = TRUE)\n dif <- maxHindcast - max(obs, na.rm = TRUE)\n frc[higherIndex] <- frc[higherIndex] - dif\n }\n \n if (length(lowerIndex) > 0) {\n minHindcast <- min(hindcast, na.rm = TRUE)\n dif <- minHindcast - min(obs, nna.rm = TRUE)\n frc[lowerIndex] <- frc[lowerIndex] - dif\n }\n \n frc[non_extrapolateIndex] <- quantile(obs, probs = ecdfHindcast(frc[non_extrapolateIndex]), \n na.rm = TRUE, type = 4)\n } else {\n frc <- quantile(obs, probs = ecdfHindcast(frc), na.rm = TRUE, type = 4)\n }\n return(frc)\n}\n\nbiasCorrect_core_eqm_preci <- function(frc, hindcast, obs, minHindcastPreci, extrapolate, \n prThreshold) {\n \n # Most of time this condition seems useless because minHindcastPreci comes from hindcast, so there will be\n # always hindcast > minHindcastPreci exists.\n # Unless one condition that minHindcastPreci is the max in the hindcast, than on hindcast > minHindcastPreci\n if (length(which(hindcast > minHindcastPreci)) > 0) {\n \n ecdfHindcast <- ecdf(hindcast[hindcast > minHindcastPreci])\n \n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n \n # drizzle is to see whether there are some precipitation between the min frc (over threshold) and \n # min hindcast (over threshold).\n drizzle <- which(frc > minHindcastPreci & frc <= min(hindcast[hindcast > minHindcastPreci], na.rm = TRUE) \n & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n \n if (extrapolate == 'constant') {\n \n # This higher and lower index mean the extrapolation part\n higherIndex <- which(frc[rain] > max(hindcast, na.rm = TRUE))\n lowerIndex <- which(frc[rain] < min(hindcast, na.rm = TRUE))\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(rain), extrapolateIndex)\n \n if (length(higherIndex) > 0) {\n maxHindcast <- max(hindcast, na.rm = TRUE)\n dif <- maxHindcast - max(obs, na.rm = TRUE)\n frc[rain[higherIndex]] <- frc[higherIndex] - dif\n }\n \n if (length(lowerIndex) > 0) {\n minHindcast <- min(hindcast, na.rm = TRUE)\n dif <- minHindcast - min(obs, nna.rm = TRUE)\n frc[rain[lowerIndex]] <- frc[lowerIndex] - dif\n }\n \n # Here the original function doesn't accout for the situation that extraploateIndex is 0\n # if it is 0, rain[-extraploateIndex] would be nothing\n \n # Above has been solved by using setdiff.\n frc[rain[non_extrapolateIndex]] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain[non_extrapolateIndex]]), \n na.rm = TRUE, type = 4)\n } else {\n \n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain]), na.rm = TRUE, type = 4)\n }\n }\n if (length(drizzle) > 0){\n \n # drizzle part is a seperate part. it use the ecdf of frc (larger than minHindcastPreci) to \n # biascorrect the original drizzle part\n frc[drizzle] <- quantile(frc[which(frc > min(hindcast[which(hindcast > minHindcastPreci)], na.rm = TRUE) & \n !is.na(frc))], probs = ecdfFrc(frc[drizzle]), na.rm = TRUE, \n type = 4)\n }\n \n frc[noRain] <- 0\n \n } else {\n # in this condition minHindcastPreci is the max of hindcast, so all hindcast <= minHindcastPreci\n # And frc distribution is used then.\n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], probs = ecdfFrc(frc[rain]), \n na.rm = TRUE, type = 4)\n }\n frc[noRain]<-0\n }\n return(frc)\n}\n\nbiasCorrect_core_gqm <- function(frc, hindcast, obs, prThreshold, minHindcastPreci) {\n if (any(obs > prThreshold)) {\n \n ind <- which(obs > prThreshold & !is.na(obs))\n obsGamma <- fitdistr(obs[ind],\"gamma\")\n ind <- which(hindcast > 0 & !is.na(hindcast))\n hindcastGamma <- fitdistr(hindcast[ind],\"gamma\")\n rain <- which(frc > minHindcastPreci & !is.na(frc))\n noRain <- which(frc <= minHindcastPreci & !is.na(frc))\n \n probF <- pgamma(frc[rain], hindcastGamma$estimate[1], rate = hindcastGamma$estimate[2])\n frc[rain] <- qgamma(probF,obsGamma$estimate[1], rate = obsGamma$estimate[2])\n frc[noRain] <- 0\n } else {\n warning('All the observations of this cell(station) are lower than the threshold, \n no bias correction applied.')\n }\n return(frc)\n }\n", - "created" : 1449959816305.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "4278190572", - "id" : "166D8D14", - "lastKnownWriteTime" : 1452416339, - "path" : "E:/1/R/hyfo/R/biasCorrect(generic).R", - "project_path" : "R/biasCorrect(generic).R", - "properties" : { - "tempName" : "Untitled1" - }, - "relative_order" : 9, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/222F1822 b/.Rproj.user/D53FD3E6/sdb/per/t/222F1822 deleted file mode 100644 index fd02f78..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/222F1822 +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "hyfo 1.3.6\n==========\nDate: 2015.12.15\n\n- transfer from ncdf to ncdf4\n- grepAndMatch created, for capturing dimension names.\n- minor bug fixed about the loadNcdf, when no dimension found, it will give an error indicating.\n- change most of the match function into grepAndMatch, in order to deal with different dimension names.\n- add name attributes to gridfile$xyCoords$x,y, when writeNcdf, the dim names will be taken from that attribute, which can be exactly the same with the original. \n- bug fixed for nc files without members.\n\nNOTE:\n----\n- for hyfo$Data part, when load and write using ncdf4, there will be very little differences compared to the original, which cannot be addressed. If you first load an ncdf file, then write it, then load it again. The data part may have very little difference, less than 10E-5.\n\n\n\nhyfo 1.3.5\n==========\nDate: 2015.12.6\n\n- travis check passed, change the rgdal version from 0.9-3 back to 0.8-16 due to the lack of packages on travis ubuntu.\n- changed .yml file to fix the problem with No repository set, so cyclic dependency check skipped.\n- on CRAN\n- fully supported for windows, Linux and OS.\n\n\n\nhyfo 1.3.3\n==========\nDate: 2015.11.27\n\n- Delete readData_folder, since it's only windows based, add information to get special version for windows users.\n- travis test added.\n\n\nhyfo 1.3.2\n==========\nDate: 2015.11.7\n\n- bug fixed about getPreciBar, signature('data.frame')\n- vignettes updated about bug and hided the warning information.\n- Add how to debug in the documentation for the generic functions.\n\n\n\nhyfo 1.3.1\n==========\nDate: 2015.11.3\n\n- new generic function biasCorrect, extractPeriod, resample, getAnnual, getPreciBar added. No need to designate input type any more, R will detect automatically.\n- coordinates conversion function extracted.\n- new user manual about real time bias correction and resample added.\n\n\n\nhyfo 1.2.9\n==========\nDate: 2015.10.30\n\n- new biasFactor S4 class added, to avoid set the input type every time.\n- operational bias correction has been changed to generic function.\n- news file added.\n\n\n\nhyfo 1.2.8\n==========\nDate: 2015.10.10\n\n- operational bias correction added, in normal function.", - "created" : 1446423165783.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "4159313557", - "id" : "222F1822", - "lastKnownWriteTime" : 1452416339, - "path" : "E:/1/R/hyfo/NEWS", - "project_path" : "NEWS", - "properties" : { - }, - "relative_order" : 2, - "source_on_save" : false, - "type" : "text" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/39BBA10B b/.Rproj.user/D53FD3E6/sdb/per/t/39BBA10B deleted file mode 100644 index 268b842..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/39BBA10B +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "#' Get variable name of the NetCDF file.\n#' \n#' Get variable name in the NetCDF file. After knowning the name, you can use \\code{loadNcdf} to load\n#' the target variable.\n#' \n#' @param filePath A path pointing to the netCDF file.\n#' @return The names of the varialbes in the file.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @import ncdf4\n#' @references \n#' \n#' \\itemize{\n#' \\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\n#' Earlier) Format Data Files. R package version 1.14.1.\n#' http://CRAN.R-project.org/package=ncdf4\n#' }\n#' \n#' \n#' \n#' @export\ngetNcdfVar <- function(filePath) {\n nc <- nc_open(filePath)\n names <- names(nc$var)\n return(names)\n}\n\n\n#' Load NetCDF file\n#' \n#' @param filePath A path pointing to the NetCDF file, version3.\n#' @param varname A character representing the variable name, you can use \\code{getNcdfVar} to\n#' get the basic information about the variables and select the target.\n#' @param tz A string representing the time zone, default is GMT, if you know what time zone is \n#' you can assign it in the argument. If \\code{tz = ''}, current time zone will be taken.\n# @param drop When the time dimension only have one value, the output data will drop\n# this dimension automatically (\\code{drop = TRUE}), default value is \\code{drop = FALSE}, then time dimension will be added.\n# This argument mainly applies to the later calculations based on hyfo file. If the dimension\n# is dropped, than some calculations may not be processed afterwards. \n#' @param ... Several arguments including Year, month, lon, lat \n#' type in \\code{?downscaleNcdf} for details.You can load while downscale, \n#' and also first load than use \\code{downscaleNcdf} to downscale.\n#' @return A list object from \\code{hyfo} containing the information to be used in the analysis, \n#' or biascorrection.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' # you can directly add your downscale information to the argument.\n#' nc1 <- loadNcdf(filePath, varname, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7))\n#' nc2 <- loadNcdf(filePath, varname, year = 2005, month = 3:8, lon = c(-2, -0.5), \n#' lat = c(43.2, 43.7))\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @import ncdf4\n#' @references \n#' \n#' \\itemize{\n#' \\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\n#' Earlier) Format Data Files. R package version 1.14.1.\n#' http://CRAN.R-project.org/package=ncdf4\n#' \n#' \\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\n#' version 2.2-6. http://meteo.unican.es/ecoms-udg\n#' }\n#' \n#' \nloadNcdf <- function(filePath, varname, tz = 'GMT', ...) {\n nc <- nc_open(filePath)\n \n var <- nc$var\n # Use name to locate the variable\n call_1 <- as.call(c(\n list(as.name('$'), var, varname)\n ))\n var <- eval(call_1)\n if(is.null(var)) stop('No such variable name, check source file.')\n \n dimNames <- unlist(lapply(1:length(var$dim), function(x) var$dim[[x]]$name))\n \n # Only deals with the most common dimensions, futher dimensions will be added in future.\n dimIndex <- grepAndMatch(c('lon', 'lat', 'time', 'member'), dimNames)\n if (length(dimIndex) < 3) stop('Your file has less than 3 dimensions.')\n \n # First needs to identify the variable name, load the right data\n message('Loading data...')\n nc_data <- ncvar_get(nc, var)\n message('Processing...')\n \n gridData <- list()\n gridData$Variable$varName <- varname\n gridData$xyCoords$x <- var$dim[[dimIndex[1]]]$vals\n attributes(gridData$xyCoords$x)$name <- dimNames[dimIndex[1]]\n \n gridData$xyCoords$y <- var$dim[[dimIndex[2]]]$vals\n attributes(gridData$xyCoords$y)$name <- dimNames[dimIndex[2]]\n \n # Time part needs to be taken seperately\n \n timeUnit <- strsplit(var$dim[[dimIndex[3]]]$units, split = ' since')[[1]][1]\n timeDiff <- var$dim[[dimIndex[3]]]$vals\n # To get real time, time since when has to be grabbed from the dataset.\n timeSince <- as.POSIXlt(strsplit(var$dim[[dimIndex[3]]]$units, split = 'since')[[1]][2], tz = tz)\n \n \n# Date <- rep(timeSince, length(timeDiff))\n \n \n unitDic <- data.frame(weeks = 'weeks', days = 'days', hours = 'hours',\n minutes = 'mins', seconds = 'secs')\n \n timeDiff <- as.difftime(timeDiff, units = as.character(unitDic[1, timeUnit]))\n \n# if (grepl('day', timeUnit)) {\n# Date$mday <- Date$mday + timeDiff\n# } else if (grepl('second', timeUnit)) {\n# Date$sec <- Date$sec + timeDiff\n# }\n Date <- timeSince + timeDiff\n \n # data directly loaded from ncdf4 will drop the dimension with only one value.\n # the varsize shows the real dimension, without any dropping.\n dim(nc_data) <- var$varsize \n \n # Right now there is no need to add end Date, in furture, may be added as needed.\n gridData$Dates$start <- as.character(Date)\n \n # Assing data to grid data\n # At leaset should be 3 dimensions, lon, lat, time. So if less than 3, \n \n gridData$Data <- nc_data\n \n attributes(gridData$Data)$dimensions <- dimNames\n \n if (!is.na(dimIndex[4])) gridData$Members <- var$dim[[dimIndex[4]]]$vals\n \n gridData$Loaded <- 'by hyfo package, http://yuanchao-xu.github.io/hyfo/'\n nc_close(nc)\n \n output <- downscaleNcdf(gridData, ...)\n \n return(output)\n \n}\n\n\n\n\n#' Downscale NetCDF file\n#' @param gridData A hyfo list file from \\code{\\link{loadNcdf}}\n#' @param year A vector of the target year. e.g. \\code{year = 2000}, \\code{year = 1980:2000}\n#' @param month A vector of the target month. e.g. \\code{month = 2}, \\code{month = 3:12}\n#' @param lon A vector of the range of the downscaled longitude, should contain a max value\n#' and a min value. e.g. \\code{lon = c(-1.5, 2,5)}\n#' @param lat A vector of the range of the downscaled latitude, should contain a max value\n#' and a min value. e.g. \\code{lat = c(32,2, 36)}\n#' @return A downscaled hyfo list file.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' # Then write to your work directory\n#' \n#' nc1 <- downscaleNcdf(nc, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7))\n#' nc2 <- downscaleNcdf(nc, year = 2005, month = 3:8, lon = c(-2, -0.5), lat = c(43.2, 43.7))\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @export \n#' @references \n#' \n#' \\itemize{\n#' \n#' \\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\n#' version 2.2-6. http://meteo.unican.es/ecoms-udg\n#' }\n#' \n#' \ndownscaleNcdf <- function(gridData, year = NULL, month = NULL, lon = NULL, lat = NULL) {\n \n \n if (!is.null(year)) {\n Dates <- as.POSIXlt(gridData$Dates$start)\n yearIndex <- Dates$year + 1900\n monIndex <- Dates$mon + 1\n timeDim <- match('time', attributes(gridData$Data)$dimensions)\n \n \n if (is.null(month) || !any(sort(month) != month)) {\n targetYearIndex <- which(yearIndex %in% year)\n if (length(targetYearIndex) == 0) stop('No input years in the input ts, check your input.')\n \n \n # if year crossing than sort(month) != month\n } else {\n \n startIndex <- intersect(which(yearIndex == year[1] - 1), which(monIndex == month[1]))[1]\n endIndex <- tail(intersect(which(yearIndex == tail(year, 1)), which(monIndex == tail(month, 1))), 1)\n \n if (is.na(startIndex) || length(endIndex) == 0 || startIndex > endIndex) {\n stop('Cannot find input months and input years in the input time series.')\n } else {\n \n targetYearIndex <- startIndex:endIndex\n \n if (any(diff(year) != 1)) {\n # if year is not continuous, like 1999, 2003, 2005, than we have to sift again.\n # Only for special cases.\n Dates <- Dates[targetYearIndex]\n yea <- Dates$year + 1900\n mon <- Dates$mon + 1\n \n DateIndex <- unlist(sapply(year, function(x) {\n startIndex <- intersect(which(yea == x - 1), which(mon == month[1]))[1]\n endIndex <- tail(intersect(which(yea == x), which(mon == tail(month, 1))), 1)\n index <- startIndex:endIndex\n return(index)\n }))\n \n \n targetYearIndex <- targetYearIndex[DateIndex]\n # cannot directly return output here, because sometimes, month can be incontinuous,\n # we still need the next process to sift month.\n }\n }\n }\n \n gridData$Dates$start <- gridData$Dates$start[targetYearIndex]\n gridData$Dates$end <- gridData$Dates$end[targetYearIndex]\n \n gridData$Data <- chooseDim(gridData$Data, timeDim, targetYearIndex)\n } \n \n if (!is.null(month)) {\n Dates <- as.POSIXlt(gridData$Dates$start)\n monIndex <- Dates$mon + 1\n \n targetMonIndex <- which(monIndex %in% month)\n if (length(targetMonIndex) == 0) stop('Check your input year, it may exceed the years \n in the input dataset.')\n gridData$Dates$start <- gridData$Dates$start[targetMonIndex]\n gridData$Dates$end <- gridData$Dates$end[targetMonIndex]\n \n timeDim <- match('time', attributes(gridData$Data)$dimensions)\n \n gridData$Data <- chooseDim(gridData$Data, timeDim, targetMonIndex)\n \n }\n \n if (!is.null(lon)) {\n \n lonIndex <- gridData$xyCoords$x\n \n lonI1 <- which(abs(lonIndex - min(lon)) == min(abs(lonIndex - min(lon)), na.rm = TRUE)) \n lonI2 <- which(abs(lonIndex - max(lon)) == min(abs(lonIndex - max(lon)), na.rm = TRUE)) \n \n # take the as large as possible range\n targetLonIndex <- lonI1[length(lonI1)]:lonI2[length(lonI2)]\n if (length(targetLonIndex) == 0) stop('Your input lon is too small, try to expand the \n longitude range.') \n gridData$xyCoords$x <- gridData$xyCoords$x[targetLonIndex]\n lonDim <- grepAndMatch('lon', attributes(gridData$Data)$dimensions)\n \n gridData$Data <- chooseDim(gridData$Data, lonDim, targetLonIndex)\n }\n \n \n if (!is.null(lat)) {\n latIndex <- gridData$xyCoords$y\n \n latI1 <- which(abs(latIndex - min(lat)) == min(abs(latIndex - min(lat)), na.rm = TRUE)) \n latI2 <- which(abs(latIndex - max(lat)) == min(abs(latIndex - max(lat)), na.rm = TRUE)) \n \n targetLatIndex <- latI1[length(latI1)]:latI2[length(latI2)]\n \n if (length(targetLonIndex) == 0) stop('Your input lat is too small, try to expand the \n latitude range.') \n gridData$xyCoords$y <- gridData$xyCoords$y[targetLatIndex]\n latDim <- grepAndMatch('lat', attributes(gridData$Data)$dimensions)\n gridData$Data <- chooseDim(gridData$Data, latDim, targetLatIndex)\n }\n \n return(gridData)\n \n}\n\n\n\n\n\n\n\n\n\n\n#' Write to NetCDF file using hyfo list file\n#' @param gridData A hyfo list file from \\code{\\link{loadNcdf}}\n#' @param filePath A path of the new NetCDF file, should end with \".nc\"\n#' @param missingValue A number representing the missing value in the NetCDF file, default\n#' is 1e20\n#' #' @param tz A string representing the time zone, default is GMT, if you know what time zone is \n#' you can assign it in the argument. If \\code{tz = ''}, current time zone will be taken.\n#' @param units A string showing in which unit you are putting in the NetCDF file, it can be \n#' seconds or days and so on. If not specified, the function will pick up the possible largest \n#' time units from \\code{c('weeks', 'days', 'hours', 'mins', 'secs')}\n#' @param version ncdf file versions, default is 3, if 4 is chosen, output file will be foreced to version 4.\n#' @return An NetCDF version 3 file.\n#' @examples \n#' # First open the test NETcDF file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' \n#' \n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' # Then write to your work directory\n#' \n#' writeNcdf(nc, 'test.nc')\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @export \n#' @import ncdf4\n#' @references \n#' \n#' \\itemize{\n#' \\item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or\n#' Earlier) Format Data Files. R package version 1.14.1.\n#' http://CRAN.R-project.org/package=ncdf4\n#' \n#' \\item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package\n#' version 2.2-6. http://meteo.unican.es/ecoms-udg\n#' \n#' }\n#' \n#' \nwriteNcdf <- function(gridData, filePath, missingValue = 1e20, tz = 'GMT', units = NULL, version = 3) {\n \n name <- gridData$Variable$varName\n # First defines dimensions.\n lonName <- attributes(gridData$xyCoords$x)$name\n latName <- attributes(gridData$xyCoords$y)$name\n dimLon <- ncdim_def(lonName, 'degree', gridData$xyCoords$x)\n dimLat <- ncdim_def(latName, 'degree', gridData$xyCoords$y)\n dimMem <- NULL\n if (!is.null(gridData$Members)) {\n dimMem <- ncdim_def('member', 'members', 1:length(gridData$Members))\n }\n \n \n # Time needs to be treated seperately\n dates <- as.POSIXlt(gridData$Dates$start, tz = tz)\n if (is.null(units)) {\n units <- getTimeUnit(dates)\n time <- difftime(dates, dates[1], units = units)\n } else {\n time <- difftime(dates, dates[1], units = units)\n }\n timeUnits <- paste(units, 'since', dates[1])\n # Here time needs to be numeric, as required by ncdf4 package, which is not the same\n # with ncdf\n dimTime <- ncdim_def('time', timeUnits, as.numeric(time))\n \n \n # Depending on whether there is a member part of the dataset.\n # default list\n dimList <- list(dimLon, dimLat, dimTime, dimMem)\n \n # In order to keep the dim list exactly the same with the original one, it needs to be changed.\n dimIndex <- grepAndMatch(c('lon', 'lat', 'time', 'member'), attributes(gridData$Data)$dimensions)\n dimIndex <- na.omit(dimIndex)\n \n # Here order is needed, cuz in the procesure above, c('lon', 'lat', 'time', 'member')\n # is the pattern, while actually, attributes(gridData$Data)$dimensions should be the pattern.\n # So here needs an order() to get the wanted result.\n dimList <- dimList[order(dimIndex)]\n \n # delete the NULL list, in order that there is no member part in the data.\n dimList <- Filter(Negate(is.null), dimList)\n # Then difines data\n var <- ncvar_def( name, \"units\", dimList, missingValue)\n \n \n # Here for ncdf4, there is an option to create version 4 ncdf, in future, it\n # may added here.\n if (version == 3) {\n nc <- nc_create(filePath, var) \n } else if (version == 4) {\n nc <- nc_create(filePath, var, force_v4 = TRUE)\n } else {\n stop(\"Which ncdf version you want? Only 3 and 4 can be selected!\")\n }\n \n # This part comes from the library downscaleR, can be deleted if you don't \n # use {ecomsUDG.Raccess}, by adding this, the file can be read by the package {ecomsUDG.Raccess}\n ncatt_put(nc, \"time\", \"standard_name\",\"time\")\n ncatt_put(nc, \"time\", \"axis\",\"T\")\n ncatt_put(nc, \"time\", \"_CoordinateAxisType\",\"Time\")\n #ncatt_put(nc, \"time\", \"_ChunkSize\",1)\n ncatt_put(nc, lonName, \"standard_name\",\"longitude\")\n ncatt_put(nc, lonName, \"_CoordinateAxisType\",\"Lon\")\n ncatt_put(nc, latName, \"standard_name\",\"latitude\")\n ncatt_put(nc, latName, \"_CoordinateAxisType\",\"Lat\")\n if (!is.null(dimMem)){\n ncatt_put(nc, \"member\", \"standard_name\",\"realization\")\n ncatt_put(nc, \"member\", \"_CoordinateAxisType\",\"Ensemble\")\n #att.put.ncdf(nc, \"member\", \"ref\",\"http://www.uncertml.org/samples/realisation\")\n }\n \n \n # This part has to be put\n ncatt_put(nc, 0, \"Conventions\",\"CF-1.4\")\n ncatt_put(nc, 0, 'WrittenBy', 'hyfo(http://yuanchao-xu.github.io/hyfo/)')\n \n #data <- aperm(gridData$Data, dimIndex) no need to do this, in the process above\n # when you define the dimlist, the order of the dimension was fixed.\n data <- gridData$Data\n ncvar_put(nc, name, data)\n nc_close(nc)\n \n}\n\n# For internaluse by writeNcdf\ngetTimeUnit <- function(dates) {\n units <- c('weeks', 'days', 'hours', 'mins', 'secs')\n output <- NULL\n for (unit in units) {\n time <- difftime(dates, dates[1], units = unit)\n rem <- sapply(time, function(x) x%%1)\n if (!any(rem != 0)) {\n output <- unit\n break\n }\n } \n return(output)\n}\n\n\n# Save for future use. \n#' @import ncdf4\n#' @references \n#' David Pierce (2014). ncdf: Interface to Unidata netCDF data files. R package version 1.6.8.\n#' http://CRAN.R-project.org/package=ncdf\ngetExtralDim <- function(...) {\n dimList <- list(...)\n \n \n}\n\n# in order to first grep than match.\n# match only provides for exactly match, \n# dimIndex <- grepAndMatch(c('lon', 'lat', 'time', 'member'), dimNames)\ngrepAndMatch <- function(x, table) {\n index <- unlist(lapply(x, function(x) {\n a <- grep(x, table)\n }))\n return(index)\n}", - "created" : 1449680737769.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "3683291925", - "id" : "39BBA10B", - "lastKnownWriteTime" : 1450178194, - "path" : "E:/1/R/hyfo/R/ncdf.R", - "project_path" : "R/ncdf.R", - "properties" : { - }, - "relative_order" : 12, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/46C049C3 b/.Rproj.user/D53FD3E6/sdb/per/t/46C049C3 deleted file mode 100644 index 8901087..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/46C049C3 +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "---\ntitle: '[hyfo Easy Start](http://yuanchao-xu.github.io/hyfo/)'\nauthor: '[Yuanchao Xu](https://dk.linkedin.com/in/xuyuanchao37)'\ndate: '`r Sys.Date()`'\noutput: \n pdf_document:\n toc: yes\n toc_depth: 3\n html_document:\n toc: yes\nvignette: > \n %\\VignetteIndexEntry{hyfo easy start} \n %\\VignetteEngine{knitr::rmarkdown}\n %\\VignetteEncoding{ASCII}\n---\n\n# Introduction\n\n**Official Website is [http://yuanchao-xu.github.io/hyfo](http://yuanchao-xu.github.io/hyfo), where manuals and more details can be found.**\n\nhyfo is an R package, initially designed for the European Project EUPORIAS, and cooperated with DHI Denmark, which was then extended to other uses in hydrology, hydraulics and climate.\n\nThis package mainly focuses on data process and visulization in hydrology and climate forecasting. Main function includes NetCDF file processing, data extraction, data downscaling, data resampling, gap filler of precipitation, bias correction of forecasting data, flexible time series plot, and spatial map generation. It is a good pre-processing and post-processing tool for hydrological and hydraulic modellers.\n\n**If you feel hyfo is of a little help, please cite it as following:**\n\nXu, Yuanchao(2015). hyfo: Hydrology and Climate Forecasting R Package for Data Analysis and Visualization. Retrieved from http://yuanchao-xu.github.io/hyfo/\n\n[Author in this corner](https://dk.linkedin.com/in/xuyuanchao37)\n\n#### TIPS\n* For the hydrology tools part, the minimum time unit is a day, i.e., it mainly focuses on water resource and some long term analysis. For flood analysis part, it will be added in future.\n\n\n* One important characteristic by which hyfo can be distinguished from others is its convenience in multiple plots and series plots. Most data visualization tool in hyfo provides the output that can be directly re-plot by `ggplot2`, if `output = 'ggplot'` is assigned in the argument of the function, which will be easier for the users to generated series/multiple plots afterwards. When `output = 'ggplot'` is selected, you also have to assigne a `name = 'yourname'` in the argument, for the convenience of generating multiplots in future. All the functions ending with `_comb` can generated series/multiple plots, details can be found in the user mannual. \n\n\n* For the forecasting tools part, `hyfo` mainly focuses on the post processing of the gridData derived from forecasts or other sources. The input is a list file, usually an NetCDF file. There are `getNcdfVar()`, `loadNcdf()` and `writeNcdf()` prepared in hyfo, for you to deal with NetCDF file. \n\n* If you don't like the tile, x axis, y axis of the plot, just set them as '', e.g. `title = ''`\n\n* For R beginners, R provides different functions to write to file. `write.table` is a popular choice, and after write the results to a file, you can directly copy paste to your model or to other uses.\n\n* The functions end with `_anarbe` are the functions designed specially for some case in Spain, those functions mostly are about data collection of the anarbe catchment, which will be introduced in the end of this mannual.\n", - "created" : 1443834320384.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "2573223757", - "id" : "46C049C3", - "lastKnownWriteTime" : 1449243907, - "path" : "E:/1/R/hyfo/vignettes/hyfo.Rmd", - "project_path" : "vignettes/hyfo.Rmd", - "properties" : { - }, - "relative_order" : 1, - "source_on_save" : false, - "type" : "r_markdown" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/5D765D44 b/.Rproj.user/D53FD3E6/sdb/per/t/5D765D44 deleted file mode 100644 index 1dd1b6a..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/5D765D44 +++ /dev/null @@ -1,18 +0,0 @@ -{ - "contents" : "#' Resample your time series or ncdf files.\n#' \n#' Resameple your time series or ncdf files, more info pleae see details.\n#' \n#' \n#' @param data a hyfo grid data or a time series, with first column date, and second column value. The date column should\n#' follow the format in \\code{as.Date}, i.e. seperate with \"-\" or \"/\". Check details for more information.\n#' @param method A string showing whether you want to change a daily data to monthly data or monthly\n#' data to daily data.e.g. \"mon2day\" and \"day2mon\".\n#' @details \n#' Note, when you want to change daily data to monthly data, a new date column will be generated,\n#' usually the date column will be the middle date of each month, 15th, or 16th. However, if your \n#' time series doesn't start from the beginning of a month or ends to the end of a month, e.g. \n#' from 1999-3-14 to 2008-2-2, the first and last generated date could be wrong. Not only the date, but also the data, because you are \n#' not calculating based on a intact month. \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @return converted time series.\n#' @examples\n#' # Daily to monthly\n#' data(testdl)\n#' TS <- testdl[[2]] # Get daily data\n#' str(TS)\n#' TS_new <- resample(TS, method = 'day2mon')\n#' \n#' # Monthly to daily\n#' TS <- data.frame(Date = seq(as.Date('1999-9-15'), length = 30, by = '1 month'), \n#' runif(30, 3, 10))\n#' TS_new <- resample(TS, method = 'mon2day')\n#' \n#' #' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' nc_new <- resample(nc, 'day2mon')\n#' \n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @importFrom stats aggregate\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n#' \nsetGeneric('resample', function(data, method) {\n standardGeneric('resample')\n})\n\n\n#' @describeIn resample\nsetMethod('resample', signature('data.frame'),\n function(data, method) {\n result <- resample.TS(data, method)\n return(result)\n })\n\n#' @describeIn resample\nsetMethod('resample', signature('list'),\n function(data, method) {\n result <- resample.list(data, method)\n return(result)\n })\n\n\n\n#' @importFrom stats aggregate\nresample.TS <- function(TS, method) {\n if (length(TS) != 2) {\n stop('Time series not correct, should be two columns, Date and value.')\n } else if (!grepl('-|/', TS[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.')\n } \n \n \n if (method == 'mon2day') {\n \n data <- apply(TS, MARGIN = 1 , FUN = mon2day)\n \n output <- do.call('rbind', data)\n } else if (method == 'day2mon') {\n Date <- as.Date(TS[, 1])\n year <- format(Date, format = '%Y')\n mon <- format(Date, format = '%m')\n \n data <- aggregate(TS, by = list(mon, year), FUN = mean, na.rm = TRUE)[, 3:4]\n rownames(data) <- 1:dim(data)[1]\n output <- data\n } else {\n stop('method is not correct, check method argument.')\n }\n \n return (output)\n}\n\n#' @importFrom stats aggregate\nresample.list <- function(hyfo, method) {\n checkHyfo(hyfo)\n hyfoData <- hyfo$Data\n Date <- as.POSIXlt(hyfo$Dates$start)\n year <- Date$year + 1900\n mon <- Date$mon + 1\n # hyfoDim <- attributes(hyfoData)$dimensions\n # resample focuses on time dimension. No matter whether the member dimension exists.\n timeIndex <- match('time', attributes(hyfoData)$dimensions)\n dimArray <- 1:length(attributes(hyfoData)$dimensions)\n \n if (method == 'day2mon') {\n hyfoData <- apply(hyfoData, MARGIN = dimArray[-timeIndex], \n function(x) aggregate(x, by = list(mon, year), FUN = mean, na.rm = TRUE)[, 3])\n Date <- aggregate(Date, by = list(mon, year), FUN = mean, na.rm = TRUE)[, 3]\n } else if (method == 'mon2day') {\n message('Under development.')\n }\n \n hyfo$Dates$start <- Date\n hyfo$Data <- hyfoData\n return(hyfo)\n}\n\n\n\n\n#' @importFrom utils tail\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n#' \nmon2day <- function(monData) {\n Date <- as.Date(monData[1])\n data <- monData[2]\n \n DateY <- format(Date, format = '%Y')\n DateM <- format(Date, format = '%m')\n DateL <- seq(Date, length = 2, by = '1 months')[2] - Date\n \n DateD <- 1:DateL\n \n start <- as.Date(paste(DateY, DateM, DateD[1], sep = '-'))\n end <- as.Date(paste(DateY, DateM, tail(DateD, 1), sep = '-'))\n \n Date <- seq(start, end, by = '1 day')\n \n dailyData <- data.frame(Date = Date, value = rep(data, DateL))\n \n return(dailyData)\n}", - "created" : 1449960005295.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "3185161121", - "id" : "5D765D44", - "lastKnownWriteTime" : 1446994437, - "path" : "E:/1/R/hyfo/R/resample(generic).R", - "project_path" : "R/resample(generic).R", - "properties" : { - "tempName" : "Untitled1" - }, - "relative_order" : 15, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/62C82440 b/.Rproj.user/D53FD3E6/sdb/per/t/62C82440 new file mode 100644 index 0000000..b192179 --- /dev/null +++ b/.Rproj.user/D53FD3E6/sdb/per/t/62C82440 @@ -0,0 +1,17 @@ +{ + "contents" : "Package: hyfo\nType: Package\nTitle: Hydrology and Climate Forecasting\nVersion: 1.3.7\nDate: 2016-3-21\nAuthors@R: person(\"Yuanchao\", \"Xu\", email = \"xuyuanchao37@gmail.com\",\n role = c(\"aut\", \"cre\"))\nDescription: Focuses on data processing and visualization in hydrology and\n climate forecasting. Main function includes data extraction, data downscaling, data \n resampling, gap filler of precipitation, bias correction of forecasting data, flexible\n time series plot, and spatial map generation. It is a good pre-processing and \n post-processing tool for hydrological and hydraulic modellers.\nLicense: GPL-2\nDepends: \n R (>= 3.1.0),\n stats (>= 3.1.3),\n utils(>= 3.1.3),\nImports: \n ggplot2 (>= 1.0.1),\n reshape2 (>= 1.4.1),\n zoo (>= 1.7-12),\n rgdal (>= 0.8-16),\n plyr (>= 1.8.3),\n moments (>= 0.14),\n lmom (>= 2.5),\n maps(>= 2.3-9),\n maptools (>= 0.8-36),\n rgeos (>= 0.3-8),\n ncdf4 (>= 1.14.1),\n MASS (>= 7.3-39),\n methods\nSuggests: \n gridExtra,\n knitr,\n rmarkdown\nVignetteBuilder: knitr\nLazyData: true\nURL: http://yuanchao-xu.github.io/hyfo/\nBugReports: https://github.com/Yuanchao-Xu/hyfo/issues\nRepository: CRAN\n", + "created" : 1458576791467.000, + "dirty" : false, + "encoding" : "ASCII", + "folds" : "", + "hash" : "355092708", + "id" : "62C82440", + "lastKnownWriteTime" : 1458576809, + "path" : "E:/1/R/hyfo/DESCRIPTION", + "project_path" : "DESCRIPTION", + "properties" : { + }, + "relative_order" : 1, + "source_on_save" : false, + "type" : "dcf" +} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/6A0664B3 b/.Rproj.user/D53FD3E6/sdb/per/t/6A0664B3 deleted file mode 100644 index 5db9bf5..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/6A0664B3 +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "#' Get mean rainfall data.\n#' \n#' Get mean rainfall data, e.g. mean annual rainfall, mean monthly rainfall and mean winter rainfall.\n#' \n#' @param inputTS A time series with only data column (1 column).\n#' @param method A string showing the method used to calculate mean value, e.g., \"annual\".\n#' more information please refer to details.\n#' @param yearIndex A NUMERIC ARRAY showing the year index of the time series.\n#' @param monthIndex A NUMERIC ARRAY showing the month index of the time series.\n#' @param fullResults A boolean showing whether the full results are shown, default is FALSE. If \n#' FALSE, only mean value will be returned, if TRUE, the sequence of values will be returned.\n#' @param omitNA A boolean showing in the calculation, whether NA is omitted, default is FALSE.\n#' @param plot A boolean showing whether the results will be plotted.\n#' @param ..., \\code{title, x, y} showing the title and x and y axis of the plot, shoud be a string.\n#' @details\n#' There are following methods to be selected, \n#' \"annual\": annual rainfall of each year is plotted. \n#' \"winter\", \"spring\", \"autumn\", \"summer\": seasonal rainfall of each year is plotted.\n#' Month(number 1 to 12): month rainfall of each year is plotted, e.g. march rainfall of each year.\n#' \"meanMonthly\": the mean monthly rainfall of each month over the whole period.\n#' \n#' Since \"winter\" is a crossing year, 12, 1, 2, 12 is in former year, and 1, 2 are in latter year.\n#' so winter belongs to the latter year.\n#' \n#' @return The mean value of the input time series or the full results before calculating mean.\n#' \n# data(testdl)\n# TS <- testdl[[1]]\n# year = as.numeric(format(TS[, 1], '%Y'))\n# month = as.numeric(format(TS[, 1], '%m'))\n# \n# # Get the mean spring precipitation.\n# a <- getMeanPreci(TS[, 2], method = 'spring', yearIndex = year, monthIndex = month)\n# a\n# \n# # Get the series of spring precipitation, set fullResults = TRUE.\n# a <- getMeanPreci(TS[, 2], method = 'spring', yearIndex = year, monthIndex = month,\n# fullResults = TRUE)\n# a\n# \n# # If missing value is excluded, set omitNA = TRUE.\n# a <- getMeanPreci(TS[, 2], method = 'winter', yearIndex = year, monthIndex = month,\n# omitNA = TRUE, fullResults = TRUE)\n# a\n# \n# # Get special month precipitation, e.g. march.\n# a <- getMeanPreci(TS[, 2], method = 3, yearIndex = year, monthIndex = month,\n# fullResults = TRUE)\n# a\n# \n# # We can also get annual precipitation.\n# a <- getMeanPreci(TS[, 2], method = 'annual', yearIndex = year, monthIndex = month,\n# fullResults = TRUE)\n\ngetMeanPreci <- function(inputTS, method = NULL, yearIndex = NULL, monthIndex = NULL,\n fullResults = FALSE, omitNA = TRUE, plot = FALSE, ...) {\n # First check if all the records are NA.\n if (any(!is.na(inputTS))) {\n #converting daily preci to the wanted preci.\n if (method == 'annual') {\n ###yearIndex <- startTime$year + 1900\n annualPreci <- tapply(inputTS, INDEX = yearIndex, FUN = sum, na.rm = omitNA)#ggplot is able not to show NA, so choose TRUE\n if (fullResults == TRUE) output <- annualPreci else output <- mean(annualPreci, na.rm = TRUE)\n \n } else if (method == 'meanMonthly') {\n \n monthlypreci <- tapply(inputTS, INDEX = list(yearIndex, monthIndex), FUN = sum, na.rm = omitNA)\n meanMonthlyPreci <- apply(monthlypreci, MARGIN = 2, FUN = mean, na.rm = TRUE)\n \n if (fullResults == TRUE) output <- meanMonthlyPreci else output <- mean(meanMonthlyPreci, na.rm = TRUE)\n \n }else if (method == 'winter') {\n# #winter is the most tricky part, because it starts from Dec to Feb next year, it's a year-crossing season,\n# #so we have to make some changes to the monthIndex\n# #e.g.data from 1950.1.1 - 2008.3.31 if we want to calculate the mean winter preci, to calculate winter month\n# #December, we have to move the yearIndex one month forwards or two months backwards, to make 12,1,2 in one year \n# ###yearIndex <- startTime$year + 1900\n# ###monthIndex <- startTime$mon + 1\n# \n# #we move the yearIndex one month backwards\n# yearIndex_new <- c(yearIndex[32:length(yearIndex)], rep(tail(yearIndex, 1), 31))\n# \n# winterIndex <- which(monthIndex == 12 | monthIndex == 1 | monthIndex == 2)\n# winterYear <- yearIndex_new[winterIndex]#this index is used for calculation\n# \n# #because we don't have 1949.Dec, so the first winter is not intact, so first two months are elemenated\n# \n# startIndex <- length(which(winterYear == yearIndex[1])) + 1\n# winterOfLastYear <- length(which(winterYear == tail(yearIndex, 1)))\n# if (winterOfLastYear > 91) {\n# endIndex <- length(winterYear) - 31 #in case the data set ends at Dec.31\n# \n# } else if (winterOfLastYear < 90) { # incase the data ends at Jan 31\n# endIndex <- length(winterYear) - length(which(winterYear == tail(yearIndex, 1)))\n# \n# } else {\n# endIndex <- length(winterYear)\n# }\n# \n# inputTS <- inputTS[winterIndex][startIndex:endIndex]#needs two process with inputPreci, first, extract\n# #the winter preci, second, delete first two month of 1950\n# \n# winterYear <- winterYear[startIndex:endIndex]#needs one process, delete two months \n# seasonalPreci <- tapply(inputTS,INDEX = winterYear, FUN = sum, na.rm = omitNA)\n \n # upper part is the older method saved as backup.\n \n matrix <- tapply(inputTS, INDEX = list(monthIndex, yearIndex), FUN = sum, na.rm = omitNA)\n col <- colnames(matrix)\n dec <- matrix['12',] # extract December.\n dec <- c(NA, dec[1:length(dec) - 1]) # rearrange December order to push it to next year.\n names(dec) <- col\n matrix <- rbind(dec, matrix[rownames(matrix) != '12', ]) \n seasonalPreci <- apply(matrix, MARGIN = 2, function(x) sum(x[c('dec', '1', '2')]))\n \n if (fullResults == TRUE) output <- seasonalPreci else output <- mean(seasonalPreci, na.rm = TRUE) \n \n } else if (method == 'spring') {\n \n# springIndex <- which(monthIndex == 3 | monthIndex == 4 | monthIndex == 5)\n# springYear <- yearIndex[springIndex]\n# inputTS <- inputTS[springIndex]\n# seasonalPreci <- tapply(inputTS, INDEX = springYear, FUN = sum, na.rm = omitNA)\n \n \n matrix <- tapply(inputTS, INDEX = list(monthIndex, yearIndex), FUN = sum, na.rm = omitNA)\n seasonalPreci <- apply(matrix, MARGIN = 2, function(x) sum(x[c('3', '4', '5')]))\n \n if (fullResults == TRUE) output <- seasonalPreci else output <- mean(seasonalPreci, na.rm = TRUE)\n \n } else if (method == 'summer') {\n \n matrix <- tapply(inputTS, INDEX = list(monthIndex, yearIndex), FUN = sum, na.rm = omitNA)\n seasonalPreci <- apply(matrix, MARGIN = 2, function(x) sum(x[c('6', '7', '8')]))\n \n if (fullResults == TRUE) output <- seasonalPreci else output <- mean(seasonalPreci, na.rm = TRUE)\n \n } else if (method == 'autumn') {\n \n matrix <- tapply(inputTS, INDEX = list(monthIndex, yearIndex), FUN = sum, na.rm = omitNA)\n seasonalPreci <- apply(matrix, MARGIN = 2, function(x) sum(x[c('9', '10', '11')]))\n\n if (fullResults == TRUE) output <- seasonalPreci else output <- mean(seasonalPreci, na.rm = TRUE)\n \n } else if (is.numeric(method)) {\n \n month <- method\n \n #check if month exist \n e <- match(month, unique(monthIndex))\n if (is.na(e)) {\n e1 <- paste(unique(monthIndex), collapse = ',')\n m <- paste('No input month exists in the dataset, choose among', e1)\n stop(m)\n }\n \n monthlyPreci <- tapply(inputTS, INDEX = list(yearIndex, monthIndex), \n FUN = sum, na.rm = omitNA)[, toString(month)]\n \n if (fullResults == TRUE) output <- monthlyPreci else output <- mean(monthlyPreci, na.rm = TRUE)\n }\n \n } else {\n output <- NA\n }\n\n if (plot == TRUE) {\n a <- data.frame(Date = names(output), value = output)\n \n theme_set(theme_bw())\n mainLayer <- with(a, {\n ggplot(a) +\n geom_bar(aes(x = Date, y = value), stat = 'identity', fill = 'cyan') +\n labs(empty = NULL, ...) +#in order to pass \"...\", arguments shouldn't be empty.\n theme(plot.title = element_text(size = rel(1.3), face = 'bold'),\n axis.title.x = element_text(size = rel(1.2)),\n axis.title.y = element_text(size = rel(1.2))) + \n theme(axis.text.x = element_text(angle = 90, hjust = 1))\n })\n \n print (mainLayer)\n }\n \n return(output)\n}\n", - "created" : 1449959878337.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "1620316423", - "id" : "6A0664B3", - "lastKnownWriteTime" : 1446467132, - "path" : "E:/1/R/hyfo/R/getMeanPreci.R", - "project_path" : "R/getMeanPreci.R", - "properties" : { - }, - "relative_order" : 13, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/805EE7EC b/.Rproj.user/D53FD3E6/sdb/per/t/805EE7EC deleted file mode 100644 index b1e709c..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/805EE7EC +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "#' Check data for bind function.\n#' \n#' check if the data is available for \\code{rbind()} or \\code{cbind()}\n#' \n#' @param data A list containing different sublists ready to be processed by \\code{do.call('rbind')} \n#' or \\code{do.call('cbind')}\n#' @param bind A string showing which bind you are going to use can be 'rbind' or 'cbind'\n#' @return data can be processed by bind function; data cannot be processed by bind function\n#' @examples\n#' data <- list(c(1,1,1),c(2,2,2))\n#' bind <- 'rbind'\n#' checkBind(data,bind)\n#' \n#' data(testdl)\n#' \\dontrun{\n#' checkBind(testdl, 'rbind')\n#' }\n#' # Since the colnames in testdl are not the same, so it cannot be bound.\n#' #\n#' @export\ncheckBind <- function(data, bind){\n # data has to be a list of values, and will be used in do.call('rbind')\n message ('Check if the data list is available for rbind or cbind... \\n')\n if (bind == 'rbind') {\n colNum <- sapply(data, function(x) dim(x)[2])\n colLev <- unique(colNum)\n if (length(colLev) != 1) {\n dif <- colLev[2]\n difNum <- which(colNum == dif)\n stop(sprintf('Different Colomn number in %s th of the input list \\n', difNum))\n \n }\n \n # For rbind, colnames has to be checked as well.\n colNameNum <- lapply(data, function(x) colnames(x))\n sameName <- sapply(1:length(colNameNum), function(x) colNameNum[[x]] == colNameNum[[1]])\n if (any(!is.null(unlist(colNameNum))) & (any(sameName == FALSE) | any(length(unlist(sameName)) == 0))) {\n stop('Data in list have Different colnames, which cannot process rbind. ')\n }\n \n \n }else if (bind =='cbind') {\n rowNum <- sapply(data, function(x) dim(x)[1])\n rowLev <- unique(rowNum)\n if (length(rowLev) != 1) {\n dif <- rowLev[2]\n difNum <- which(rowNum == dif)\n stop(sprintf('Different row number in %s th of the input list \\n', rowNum))\n \n }\n }\n message('Data list is OK')\n}\n\n# Check if a input file is a hyfo grid file.\ncheckHyfo <- function(...) {\n datalist <- list(...)\n lapply(datalist, FUN = checkHyfo_core)\n invisible()\n}\n\ncheckHyfo_core <- function(hyfo) {\n #This is to check if the input is a hyfo list.\n checkWord <- c('Data', 'xyCoords', 'Dates')\n if (any(is.na(match(checkWord, attributes(hyfo)$names)))) {\n stop('Input dataset is incorrect, it should contain \"Data\", \"xyCoords\", and \"Dates\",\ncheck help for details or use loadNCDF to read NetCDF file.\n\nIf time series input is needed, and your input is a time series, please put \"TS = yourinput\".')\n }\n}\n\n# This check dim is based on the name of the dimension\ncheckDimLength <- function(..., dim) {\n datalist <- list(...)\n \n for (x in dim) {\n dimLength <- sapply(datalist, function(y) calcuDim(y, x))\n if (any(is.na(dimLength))) stop('No input dimension name, check your dimension name.')\n if (length(unique(dimLength)) != 1) stop('Input data have different dimemsion length.')\n }\n \n invisible()\n}\n\n\n\n\n###########################################################################################\n##### For biasFactor class\n\n##### Validity functions\n\ncheckBiasFactor <- function(object) {\n errors <- character()\n if (length(object@biasFactor) == 0) {\n msg <- 'biasFactors should not be empty.'\n errors <- c(errors, msg)\n }\n \n if (length(object@method) == 0) {\n msg <- 'method should not be empty.'\n errors <- c(errors, msg)\n }\n \n if (length(object@preci) == 0) {\n msg <- 'preci should not be empty.' \n errors <- c(errors, msg)\n }\n \n prThreshold <- object@prThreshold\n if (length(prThreshold) != 0) {\n if (prThreshold < 0) {\n msg <- 'prThreshold should be greater than 0.'\n errors <- c(errors, msg)\n }\n }\n \n scaleType <- object@scaleType\n if (length(scaleType) != 0) {\n if (scaleType != 'multi' & scaleType != 'add') {\n msg <- paste('scaleType is ', scaleType, '. Should be \"multi\" or \"add\".', sep = '')\n errors <- c(errors, msg)\n }\n }\n \n extrapolate <- object@extrapolate\n if (length(extrapolate) != 0) {\n if (extrapolate != 'no' & extrapolate != 'constant') {\n msg <- paste('extrapolate is ', extrapolate, '. Should be \"no\" or \"constant\".', sep = '')\n errors <- c(errors, msg)\n }\n }\n \n if (length(errors) == 0) TRUE else errors\n}\n\n\ncheckBiasFactor.hyfo <- function(object) {\n errors <- character()\n length_lonLatDim <- length(object@lonLatDim)\n if (length_lonLatDim != 2) {\n msg <- paste('lonLatDim is length ', length_lonLatDim, '. Should be 2', sep = '')\n errors <- c(errors, msg)\n }\n \n if (length(errors) == 0) TRUE else errors\n}\n\n\n", - "created" : 1449959837684.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "3565279330", - "id" : "805EE7EC", - "lastKnownWriteTime" : 1446223879, - "path" : "E:/1/R/hyfo/R/check.R", - "project_path" : "R/check.R", - "properties" : { - }, - "relative_order" : 10, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/9CDBC212 b/.Rproj.user/D53FD3E6/sdb/per/t/9CDBC212 deleted file mode 100644 index 77bbbfd..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/9CDBC212 +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "# Generated by roxygen2 (4.1.1): do not edit by hand\n\nexport(applyBiasFactor)\nexport(biasCorrect)\nexport(checkBind)\nexport(collectData)\nexport(collectData_csv_anarbe)\nexport(collectData_excel_anarbe)\nexport(collectData_txt_anarbe)\nexport(downscaleNcdf)\nexport(extractPeriod)\nexport(fillGap)\nexport(getAnnual)\nexport(getBiasFactor)\nexport(getEnsem_comb)\nexport(getFrcEnsem)\nexport(getHisEnsem)\nexport(getLMom)\nexport(getMoment)\nexport(getNcdfVar)\nexport(getPreciBar)\nexport(getPreciBar_comb)\nexport(getSpatialMap)\nexport(getSpatialMap_comb)\nexport(getSpatialMap_mat)\nexport(list2Dataframe)\nexport(loadNcdf)\nexport(plotTS)\nexport(plotTS_comb)\nexport(resample)\nexport(shp2cat)\nexport(writeNcdf)\nexportClasses(biasFactor)\nimport(ggplot2)\nimport(maps)\nimport(maptools)\nimport(ncdf4)\nimport(plyr)\nimport(rgdal)\nimport(rgeos)\nimportFrom(MASS,fitdistr)\nimportFrom(grDevices,rainbow)\nimportFrom(lmom,samlmu)\nimportFrom(methods,new)\nimportFrom(methods,setClass)\nimportFrom(methods,setGeneric)\nimportFrom(methods,setMethod)\nimportFrom(moments,kurtosis)\nimportFrom(moments,skewness)\nimportFrom(reshape2,melt)\nimportFrom(stats,aggregate)\nimportFrom(stats,coef)\nimportFrom(stats,cor)\nimportFrom(stats,ecdf)\nimportFrom(stats,lm)\nimportFrom(stats,median)\nimportFrom(stats,na.omit)\nimportFrom(stats,pgamma)\nimportFrom(stats,qgamma)\nimportFrom(stats,quantile)\nimportFrom(stats,rgamma)\nimportFrom(stats,var)\nimportFrom(utils,combn)\nimportFrom(utils,packageDescription)\nimportFrom(utils,read.csv)\nimportFrom(utils,read.fwf)\nimportFrom(utils,tail)\nimportFrom(zoo,as.Date)\n", - "created" : 1449183386056.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "547348008", - "id" : "9CDBC212", - "lastKnownWriteTime" : 1452416343, - "path" : "E:/1/R/hyfo/NAMESPACE", - "project_path" : "NAMESPACE", - "properties" : { - }, - "relative_order" : 5, - "source_on_save" : false, - "type" : "r_namespace" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/B74937DD b/.Rproj.user/D53FD3E6/sdb/per/t/B74937DD deleted file mode 100644 index 88f013a..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/B74937DD +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "Package: hyfo\nType: Package\nTitle: Hydrology and Climate Forecasting\nVersion: 1.3.6\nDate: 2015-12-10\nAuthors@R: person(\"Yuanchao\", \"Xu\", email = \"xuyuanchao37@gmail.com\",\n role = c(\"aut\", \"cre\"))\nDescription: Focuses on data processing and visualization in hydrology and\n climate forecasting. Main function includes data extraction, data downscaling, data \n resampling, gap filler of precipitation, bias correction of forecasting data, flexible\n time series plot, and spatial map generation. It is a good pre-processing and \n post-processing tool for hydrological and hydraulic modellers.\nLicense: GPL-2\nDepends: \n R (>= 3.1.0),\n stats (>= 3.1.3),\n utils(>= 3.1.3),\nImports: \n ggplot2 (>= 1.0.1),\n reshape2 (>= 1.4.1),\n zoo (>= 1.7-12),\n rgdal (>= 0.8-16),\n plyr (>= 1.8.3),\n moments (>= 0.14),\n lmom (>= 2.5),\n maps(>= 2.3-9),\n maptools (>= 0.8-36),\n rgeos (>= 0.3-8),\n ncdf4 (>= 1.14.1),\n MASS (>= 7.3-39),\n methods\nSuggests: \n gridExtra,\n knitr,\n rmarkdown\nVignetteBuilder: knitr\nLazyData: true\nURL: http://yuanchao-xu.github.io/hyfo/\nBugReports: https://github.com/Yuanchao-Xu/hyfo/issues\nRepository: CRAN\n", - "created" : 1446423221493.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "699911376", - "id" : "B74937DD", - "lastKnownWriteTime" : 1449709567, - "path" : "E:/1/R/hyfo/DESCRIPTION", - "project_path" : "DESCRIPTION", - "properties" : { - }, - "relative_order" : 3, - "source_on_save" : false, - "type" : "dcf" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/B8F92D53 b/.Rproj.user/D53FD3E6/sdb/per/t/B8F92D53 deleted file mode 100644 index 2065764..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/B8F92D53 +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "#' Get ensemble forecast from historical data.\n#' \n#' getHisEnsem use historical data as the forecasting input time series.\n#' \n#' @param TS A time series dataframe, with first column Date, and second column value.\n#' @param example A vector containing two strings showing the start and end date, which represent the \n#' forecasting period. Check details for more information.\n#'\n#' the program will extract every possible period in TS you provided to generate the ensemble. Check details for \n#' more information.\n#' @param interval A number representing the interval of each ensemble member. NOTE: \"interval\" takes\n#' 365 as a year, and 30 as a month, regardless of leap year and months with 31 days. So if you want the interval \n#' to be 2 years, set \\code{interval = 730}, which equals 2 * 365 ; if two months, set \\code{interval = 60}; \n#' 2 days, \\code{interval = 2}, for other numbers that cannot be divided by 365 or 30 without remainder, it will treat the \n#' number as days.By defualt interval is set to be 365, a year.\n#' @param buffer A number showing how many days are used as buffer period for models. Check details for more\n#' information.\n#' \n#' @param plot A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), \n#' 'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.\n#' @param output A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \n#' data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\n#' plots afterwards. NOTE: If \\code{output = 'ggplot'}, the missing value in the data will\n#' be replaced by \\code{mv}, if assigned, default mv is 0.\n#' \n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{getEnsem_comb}.\n#' \n#' @param mv A number showing representing the missing value. When calculating the cumulative value, \n#' missing value will be replaced by mv, default is 0.\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#' \n#' @details \n#' \n#' \\code{example} E.g., if you have a time series from 2000 to 2010. Assuming you are in 2003,\n#' you want to forecast the period from 2003-2-1 to 2003-4-1. Then for each year in your input\n#' time series, every year from 1st Feb to 1st Apr will be extracted to generate the ensemble\n#' forecasts. In this case your input example should be \\code{example = c('2003-2-1', '2003-4-1')}\n#' \n#' \\code{interval} doesn't care about leap year and the months with 31 days, it will take 365 as a year, and 30 as a month.\n#' e.g., if the interval is from 1999-2-1 to 1999-3-1, you should just set interval to 30, although the real interval is 28\n#' days.\n#' \n#' \\code{example} and \\code{interval} controls how the ensemble will be generated. e.g. if the time series is from \n#' 1990-1-1 to 2001-1-1.\n#' \n#' if \\code{example = c('1992-3-1', '1994-1-1')} and \\code{interval = 1095}, note, 1095 = 365 * 3, so the program treat\n#' this as 3 years.\n#' \n#' Then you are supposed to get the ensemble consisting of following part:\n#' \n#' 1. 1992-3-1 to 1994-1-1 first one is the example, and it's NOT start from 1990-3-1.\n#' 2. 1995-3-1 to 1997-1-1 second one starts from 1993, because \"interval\" is 3 years.\n#' 3. 1998-3-1 to 2000-1-1\n#' \n#' because the last one \"2000-3-1 to 2002-1-1\", 2002 exceeds the original TS range, so it will not be included.\n#' \n#' Sometimes, there are leap years and months with 31 days included in some ensemble part, in which case the length of the data will\n#' be different, e.g., 1999-1-1 to 1999-3-1 is 1 day less than 2000-1-1 to 2000-3-1. In this situation,\n#' the data will use example as a standard. If the example is 1999-1-1 to 1999-3-1, then the latter one\n#' will be changed to 2001-1-1 to 2000-2-29, which keeps the start Date and change the end Date.\n#' \n#' If the end date is so important that cannot be changed, try to solve this problem by resetting\n#' the example period, to make the event included in the example.\n#' \n#' Good set of example and interval can generate good ensemble.\n#' \n#' \\code{buffer}\n#' Sometimes the model needs to run for a few days to warm up, before the forecast. E.g., if a forecast starts at\n#' '1990-1-20', for some model like MIKE NAM model, the run needs to be started about 14 days. So the input timeseries\n#' should start from '1990-1-6'.\n#' \n#' Buffer is mainly used for the model hotstart. Sometimes the hot start file cannot contain all the parameters needed,\n#' only some important parameters. In this case, the model needs to run for some time, to make other parameters ready\n#' for the simulation.\n#' \n#' \n#' \\code{name}\n#' Assuming you have two ggplot outputs, you want to plot them together. In this situation, you\n#' need a name column to differentiate one ggplot output from the other. You can assigne this name\n#' by the argument directly, name has to be assigned if \\code{output = 'ggplot'} is selected,\n#' @return A ensemble time series using historical data as forecast.\n#' \n#' @examples\n#' \n#' data(testdl)\n#' \n#' a <- testdl[[1]]\n#' \n#' # Choose example from \"1994-2-4\" to \"1996-1-4\"\n#' b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'))\n#' \n#' # Default interval is one year, can be set to other values, check help for information.\n#' \n#' # Take 7 months as interval\n#' b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, plot = 'cum') \n#' # Take 30 days as buffer\n#' b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, buffer = 30)\n#' \n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @importFrom reshape2 melt \n#' @importFrom grDevices rainbow\n#' @import ggplot2\n#' @references \n#' \n#' \\itemize{\n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' }\n#' \n#' \n#' @export\n\ngetHisEnsem <- function (TS, example, interval = 365, buffer = 0, plot = 'norm', output = 'data', \n name = NULL, mv = 0, ...) {\n if (!grepl('-|/', TS[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.')\n } else if (!grepl('-|/', example[1]) | !grepl('-|/', example[1])) {\n stop('Wrong date format in the example, check the format in ?as.Date{base} \n and use as.Date to convert.')\n } else {\n \n \n \n TS[, 1] <- as.Date(TS[, 1])\n example <- as.Date(example ,tz = '')\n exL <- example[2] - example[1]\n # Test if example is in the range of the TS\n a <- which(TS[, 1] == example[1] | TS[, 1] == example[2])\n if (length(a) < 2) stop('Example is out of the time series, reset example.')\n \n \n \n if (interval %% 365 == 0) {\n d <- interval / 365\n \n # Get sequence of start and end date.\n \n startDate <- rev(seq(from = example[1], to = min(TS[, 1]), by = paste(-d, 'years')))\n endDate <- seq(from = example[2], to = max(TS[, 1]), by = paste(d, 'years'))\n\n n <- length(startDate) + length(endDate) - 1 # example is counted twice, should be subtracted. \n \n # Generate full start date series.\n startDate <- seq(min(startDate), length = n, by = paste(d, 'years'))\n endDate <- startDate + exL\n \n } else if (interval %% 30) {\n d <- interval / 30\n \n # Get sequence of start and end date.\n \n startDate <- rev(seq(from = example[1], to = min(TS[, 1]), by = paste(-d, 'months')))\n endDate <- seq(from = example[2], to = max(TS[, 1]), by = paste(d, 'months'))\n \n n <- length(startDate) + length(endDate) - 1\n \n startDate <- seq(min(startDate), length = n, by = paste(d, 'months'))\n endDate <- startDate + exL\n \n } else {\n d <- interval\n \n # Get sequence of start and end date.\n \n startDate <- rev(seq(from = example[1], to = min(TS[, 1]), by = paste(-d, 'days')))\n endDate <- seq(from = example[2], to = max(TS[, 1]), by = paste(d, 'days'))\n \n n <- length(startDate) + length(endDate) - 1\n \n startDate <- seq(min(startDate), length = n, by = paste(d, 'days'))\n endDate <- startDate + exL\n }\n \n data <- mapply(FUN = function(x, y) extractPeriod_dataframe(dataframe = TS, startDate = x, endDate = y),\n x = startDate, y = endDate)\n \n data <- lapply(1:n, function(x) data.frame(data[, x]))\n \n if (buffer > 0) {\n bufferStart <- example[1] - buffer\n bufferEnd <- example[1] - 1\n bufferTS <- extractPeriod_dataframe(TS, bufferStart, bufferEnd)\n \n data <- lapply(data, function(x) rbind(bufferTS, x))\n \n } else if (buffer < 0) {\n stop ('Buffer should be positive, or reset example.')\n }\n \n \n data_output <- list2Dataframe(data)\n colnames(data_output) <- c('Date', as.character(startDate))\n \n # Rearrange dataframe to make example the first column.\n ind <- match(c('Date', as.character(example[1])), colnames(data_output))\n # when use cbind, to ensure the output is also a dataframe, one inside cbind should be dataframe\n # Even output is alread a dataframe, but when ind is a single number, then output[ind] will\n # not be a dataframe, but an array.\n data_output <- cbind(data.frame(data_output[ind]), data_output[-ind])\n ex_date <- seq(from = example[1] - buffer, to = example[2], by = 1)\n data_output$Date <- ex_date\n colnames(data_output)[2] <- 'Observation'\n \n meanV <- apply(data_output[, 2:ncol(data_output)], MARGIN = 1, FUN = mean, na.rm = TRUE)\n \n data_output <- cbind(data.frame(Date = data_output[, 1]), Mean = meanV, \n data_output[, 2:ncol(data_output)])\n \n data_ggplot <- melt(data_output, id.var = 'Date')\n NAIndex <- is.na(data_ggplot$value)\n data_ggplot$nav <- rep(0, nrow(data_ggplot))\n data_ggplot$nav[NAIndex] <- 1\n \n if (plot == 'norm') {\n data_ggplot$value[NAIndex] <- mv\n \n } else if (plot == 'cum') {\n data_output[is.na(data_output)] <- mv\n cum <- cbind(data.frame(Date = data_output$Date), cumsum(data_output[2:ncol(data_output)]))\n \n data_ggplot <- melt(cum, id.var = 'Date')\n } else {\n stop('plot can only be \"norm\" or \"cum\", do not assign other words')\n }\n \n #generate different colors \n colors = c('brown1', 'dodgerblue3', rainbow(n = length(unique(data_ggplot$variable)) - 2,\n start = 0.1))\n \n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) +\n aes(x = Date, y = value, color = variable, group = variable) +\n geom_line(size = 0.5) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Observation', ], size = 1.6) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Mean', ], size = 1.6) +\n geom_point(data = data_ggplot[NAIndex, ], size = 3, shape = 4, color = 'black') +\n scale_color_manual(values = colors) +\n labs(empty = NULL, ...) +#in order to pass \"...\", arguments shouldn't be empty.\n theme(axis.text.x = element_text(size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8)))\n })\n print(mainLayer)\n \n if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n data_ggplot$name <- rep(name, nrow(data_ggplot)) \n data_ggplot$nav <- rep(0, nrow(data_ggplot))\n data_ggplot$nav[NAIndex] <- 1\n\n return(data_ggplot)\n } else {\n return(data_output)\n }\n }\n}\n\n\n\n\n\n\n#' Extract time series from forecasting data.\n#' \n#' getFrcEnsem extract timeseries from forecasting data, if forecasting data has a member session\n#' an ensemble time sereis will be returned, if forecasting data doesn't have a member session, a singe time\n#' series will be returned.\n#' \n#' @param dataset A list containing different information, should be the result of \\code{\\link{loadNcdf}}\n#' @param cell A vector containing the locaton of the cell, e.g. c(2, 3), default is \"mean\", representing\n#' the spatially averaged value. Check details for more information.\n#' @param plot A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), \n#' 'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.\n#' @param output A string showing which type of output you want. Default is \"data\", if \"ggplot\", the \n#' data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series\n#' plots afterwards. NOTE: If \\code{output = 'ggplot'}, the missing value in the data will\n#' be replaced by \\code{mv}, if assigned, default mv is 0.\n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{getEnsem_comb}.\n#' @param mv A number showing representing the missing value. When calculating the cumulative value, \n#' missing value will be replaced by mv, default is 0.\n#' @param coord A coordinate of longitude and latitude. e.g. corrd = c(lon, lat). If coord is assigned,\n#' cell argument will no longer be used.\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#' \n#' @details \n#' \n#' \\code{cell} representing the location of the cell, NOTE: this location means the index of the cell,\n#' IT IS NOT THE LONGITUDE AND LATITUDE. e.g., \\code{cell = c(2, 3)}, the program will take the 2nd longitude\n#' and 3rd latitude, by the increasing order. Longitude comes first.\n#' \n#' \\code{name}\n#' Assuming you have two ggplot outputs, you want to plot them together. In this situation, you\n#' need a name column to differentiate one ggplot output from the other. You can assigne this name\n#' by the argument directly, If name is not assigned and \\code{output = 'ggplot'} is selected, then\n#' the system time will be selected as name column.\n#' \n#' @examples \n#' \n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n\n#' # Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name\n#' varname <- getNcdfVar(filePath)\n#' nc <- loadNcdf(filePath, varname)\n#' a <- getFrcEnsem(nc)\n#' \n#' # If there is no member session in the dataset, a single time sereis will be extracted.\n#' a1 <- getFrcEnsem(tgridData)\n#' \n#' \n#' # The default output is spatially averaged, if there are more than one cells in the dataset, \n#' # the mean value of the cells will be calculated. While if you are interested in special cell, \n#' # you can assign the cell value. You can also directly use longitude and latitude to extract \n#' # time series.\n#' \n#' getSpatialMap(nc, 'mean')\n#' a <- getFrcEnsem(nc, cell = c(6,2))\n#' \n#' # From the map, cell = c(6, 2) means lon = -1.4, lat = 43.2, so you can use corrd to locate\n#' # your research area and extract time series.\n#' b <- getFrcEnsem(nc, coord = c(-1.4, 43.2))\n#' \n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @return A ensemble time series extracted from forecating data.\n#' \n#' @import ggplot2\n#' @importFrom reshape2 melt\n#' @references \n#' \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and\n#' statistical downscaling. R package version 0.6-0.\n#' https://github.com/SantanderMetGroup/downscaleR/wiki\n#' }\n#' \n#' \n#' @export\ngetFrcEnsem <- function(dataset, cell = 'mean', plot = 'norm', output = 'data', name = NULL,\n mv = 0, coord = NULL, ...) {\n # cell should be a vector showing the location, or mean representing the loacation averaged.\n \n checkHyfo(dataset)\n \n Date <- as.Date(dataset$Dates$start)\n data <- dataset$Data\n \n # Dimension needs to be arranged. Make sure first and second dimension is lat and lon.\n data <- adjustDim(data, ref = c('lon', 'lat', 'time'))\n \n if (!is.null(coord)) {\n cell <- coord2cell(coord, dataset$xyCoords$x, dataset$xyCoords$y)\n } \n \n \n if (!any(attributes(data)$dimensions == 'member')){\n message('There is no member part in the dataset, there will be only one column of value\n returned.')\n \n if (length(cell) == 2) {\n data_ensem <- data[cell[1], cell[2], ]\n \n } else if (cell == 'mean') {\n data_ensem <- apply(data, MARGIN = 3, FUN = mean, na.rm = TRUE)\n # colnames <- 1:ncol(data_ensem)\n \n } else {\n stop('Wrong cell input, check help for information.')\n }\n \n } else {\n \n if (length(cell) == 2) {\n data_ensem <- data[cell[1], cell[2], , ]\n meanV <- apply(data_ensem, MARGIN = 1, FUN = mean, na.rm = TRUE)\n data_ensem <- data.frame('Mean' = meanV, data_ensem) \n \n } else if (cell == 'mean') {\n data_ensem <- apply(data, MARGIN = c(3, 4), FUN = mean, na.rm = TRUE)\n # colnames <- 1:ncol(data_ensem)\n meanV <- apply(data_ensem, MARGIN = 1, FUN = mean, na.rm = TRUE)\n data_ensem <- data.frame('Mean' = meanV, data_ensem)\n \n } else {\n stop('Wrong cell input, check help for information.')\n }\n }\n\n \n data_output <- data.frame(Date, data_ensem)\n data_ggplot <- melt(data_output, id.var = 'Date')\n NAIndex <- is.na(data_ggplot$value)\n \n \n if (plot == 'norm') {\n data_ggplot$value[NAIndex] <- mv\n } else if (plot == 'cum') {\n data_output[is.na(data_output)] <- mv\n cum <- cbind(data.frame(Date = data_output$Date), cumsum(data_output[2:ncol(data_output)]))\n \n data_ggplot <- melt(cum, id.var = 'Date')\n \n }\n \n colors = c('brown1', rainbow(n = length(unique(data_ggplot$variable)) - 1,\n start = 0.1))\n \n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) +\n aes(x = Date, y = value, color = variable) +\n geom_line(size = 0.5) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Mean', ], size = 1.6, color = 'red') +\n geom_point(data = data_ggplot[NAIndex, ], size = 2, shape = 4, color = 'black') +\n scale_color_manual(values = colors) +\n theme(axis.text.x = element_text(size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8))) +\n labs(empty = NULL, ...)#in order to pass \"...\", arguments shouldn't be empty.\n \n })\n print(mainLayer)\n \n if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n \n data_ggplot$name <- rep(name, nrow(data_ggplot)) \n data_ggplot$nav <- rep(0, nrow(data_ggplot))\n data_ggplot$nav[NAIndex] <- 1\n return(data_ggplot)\n } else {\n return(data_output)\n }\n}\n\n\n\n#' Combine ensembles together\n#' @param ... different ensembles generated by \\code{getHisEnsem(, output = 'ggplot')} \n#' or \\code{getFrcEnsem(, output = 'ggplot')}, see details.\n#' @param nrow A number showing the number of rows.\n#' @param list If input is a list containing different ggplot data, use \\code{list = inputlist}.\n#' @param legend A boolean representing whether you want the legend. Sometimes when you combine\n#' plots, there will be a lot of legends, if you don't like it, you can turn it off by setting\n#' \\code{legend = FALSE}, default is TRUE.\n#' @param x A string of x axis name.\n#' @param y A string of y axis name.\n#' @param title A string of the title.\n#' @param output A boolean, if chosen TRUE, the output will be given.\n#' @return A combined ensemble plot.\n#' @examples \n#' \n#' data(testdl)\n#' \n#' a <- testdl[[1]]\n#' \n#' # Choose example from \"1994-2-4\" to \"1996-1-4\"\n#' \n#' \n#' b1<- getHisEnsem(a, example = c('1995-2-4', '1996-1-4'), plot = 'cum', output = 'ggplot',\n#' name = 1)\n#' \n#' b2 <- getHisEnsem(a, example = c('1995-4-4', '1996-3-4'), plot = 'cum', output = 'ggplot',\n#' name = 2)\n#' \n#' getEnsem_comb(b1, b2)\n#' getEnsem_comb(list = list(b1, b2), nrow = 2)\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @export\n#' @import ggplot2\n#' @references \n#' \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and\n#' statistical downscaling. R package version 0.6-0.\n#' https://github.com/SantanderMetGroup/downscaleR/wiki\n#' }\n#' \n#' \n#' \n\ngetEnsem_comb <- function(..., list = NULL, nrow = 1, legend = TRUE, x = '', y = '', title = '', \n output = FALSE) {\n \n if (!is.null(list)) {\n checkBind(list, 'rbind')\n data_ggplot <- do.call('rbind', list)\n } else {\n plots <- list(...)\n checkBind(plots, 'rbind')\n data_ggplot <- do.call('rbind', plots)\n } \n #data_ggplot$name <- factor(data_ggplot$name, levels = data_ggplot$name, ordered = TRUE)\n \n if (!class(data_ggplot) == 'data.frame') {\n warning('Your input is probably a list, but you forget to add \"list = \" before it.\n Try again, or check help for more information.')\n } else if (is.null(data_ggplot$name)) {\n stop('No \"Name\" column in the input data, check the arguments in getFreEnsem() or getHisEnsem(), if \n output = \"ggplot\" is assigned, more info please check ?getFreEnsem() or ?getHisEnsem().')\n }\n \n colors = c('brown1', 'dodgerblue3', rainbow(n = length(unique(data_ggplot$variable)) - 2,\n start = 0.1))\n \n theme_set(theme_bw())\n mainLayer <- with(data_ggplot, {\n ggplot(data = data_ggplot) +\n aes(x = Date, y = value, color = variable) +\n geom_line(size = 0.5) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Mean', ], size = 1.6) +\n geom_line(data = data_ggplot[data_ggplot$variable == 'Observation', ], size = 1.6) +\n geom_point(data = data_ggplot[data_ggplot$nav == 1, ], size = 2, shape = 4, color = 'black') +\n scale_color_manual(values = colors) +\n theme(axis.text.x = element_text(size = rel(1.8)),\n axis.text.y = element_text(size = rel(1.8)),\n axis.title.x = element_text(size = rel(1.8)),\n axis.title.y = element_text(size = rel(1.8))) +\n facet_wrap( ~ name, nrow = nrow) +\n labs(x = x, y = y, title = title)\n \n })\n if (legend == FALSE) {\n mainLayer <- mainLayer + \n theme(legend.position = 'none')\n# following ones are to add label, may be added in future.\n# geom_text(data = data_ggplot[data_ggplot$Date == '2003-12-10', ], aes(label = variable), hjust = 0.7, vjust = 1)\n# geom_text(data = data_ggplot[data_ggplot$variable == 'Mean', ], aes(label = variable), hjust = 0.7, vjust = 1)\n }\n \n \n print(mainLayer)\n \n if (output == TRUE) return(data_ggplot)\n \n}", - "created" : 1449959868792.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "3391383568", - "id" : "B8F92D53", - "lastKnownWriteTime" : 1449680802, - "path" : "E:/1/R/hyfo/R/getEnsemble.R", - "project_path" : "R/getEnsemble.R", - "properties" : { - }, - "relative_order" : 12, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/BDA65B7E b/.Rproj.user/D53FD3E6/sdb/per/t/BDA65B7E deleted file mode 100644 index e8c2ac5..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/BDA65B7E +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "\n\n\n\n#' Get bias factor for multi/operational/real time bias correction.\n#' \n#' When you do multi/operational/real time bias correction. It's too expensive\n#' to input hindcast and obs every time. Especially when you have a long period of hindcast\n#' and obs, but only a short period of frc, it's too unecessary to read and compute hindcast\n#' and obs everytime. Therefore, biasFactor is designed. Using \\code{getBiasFactor}, you can\n#' get the biasFactor with hindcast and observation, then you can use \\code{applyBiasFactor} to \n#' apply the biasFactor to different forecasts. \n#' \n#' @param hindcast a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \n#' representing the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as\n#' observation data. Check details for more information.\n#' @param obs a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the observation data.\n#' @param method bias correct method, including 'delta', 'scaling'...,default method is 'scaling'.\n#' @param scaleType only when the method \"scaling\" is chosen, scaleType will be available. Two different types\n#' of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method, default is 'multi'. More info check \n#' details.\n#' @param preci If the precipitation is biascorrected, then you have to assign \\code{preci = TRUE}. Since for\n#' precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. \n#' Default is FALSE, refer to details.\n#' @param prThreshold The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).\n#' @param extrapolate When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs.\n#' If 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.\n#' \n#' @seealso \\code{\\link{biasCorrect}} for method used in bias correction.\n#' \\code{\\link{applyBiasFactor}}, for the second part.\n#' \n#' @details \n#' \n#' Information about the method and how biasCorrect works can be found in \\code{\\link{biasCorrect}}\n#' \n#' \\strong{why use biasFactor}\n#' \n#' As for forecasting, for daily data, there is usually no need to have\n#' different bias factor every different day. You can calculate one bisa factor using a long\n#' period of hindcast and obs, and apply that factor to different frc.\n#' \n#' For example,\n#' \n#' You have 10 years of hindcast and observation. you want to do bias correction for some \n#' forecasting product, e.g. system 4. For system 4, each month, you will get a new forecast\n#' about the future 6 months. So if you want to do the real time bias correction, you have to\n#' take the 10 years of hindcast and observation data with you, and run \\code{biasCorrect} every\n#' time you get a new forecast. That's too expensive.\n#' \n#' For some practical use in forecasting, there isn't a so high demand for accuracy. E.g.,\n#' Maybe for February and March, you can use the same biasFactor, no need to do the computation \n#' again. \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method. \n#' \n#' @examples \n#' \n#' ######## hyfo grid file biascorrection\n#' ########\n#' \n#' # If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect\n#' # the file.\n#' \n#' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' data(tgridData)\n#' # Since the example data, has some NA values, the process will include some warning #message, \n#' # which can be ignored in this case.\n#' \n#' \n#' \n#' # Then we will use nc data as forecasting data, and use itself as hindcast data,\n#' # use tgridData as observation.\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData)\n#' newFrc <- applyBiasFactor(nc, biasFactor)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant',\n#' preci = TRUE)\n#' # This method needs obs input.\n#' newFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE)\n#' newFrc <- applyBiasFactor(nc, biasFactor) \n#' \n#' \n#' ######## Time series biascorrection\n#' ########\n#' \n#' # Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\n#' data(testdl)\n#' \n#' # common period has to be extracted in order to better train the forecast.\n#' \n#' datalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' frc <- datalist[[1]]\n#' hindcast <- datalist[[2]]\n#' obs <- datalist[[3]]\n#' \n#' \n#' # The data used here is just for example, so there could be negative data.\n#' \n#' # default method is scaling\n#' biasFactor <- getBiasFactor(hindcast, obs)\n#' frc_new <- applyBiasFactor(frc, biasFactor)\n#' \n#' # for precipitation data, extra process needs to be executed, so you have to tell\n#' # the program to it is a precipitation data.\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, preci = TRUE)\n#' frc_new1 <- applyBiasFactor(frc, biasFactor)\n#' \n#' # You can use other methods to biascorrect, e.g. delta method. \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'delta')\n#' # delta method needs obs input.\n#' frc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' # \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE)\n#' # eqm needs obs input\n#' frc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'gqm', preci = TRUE)\n#' frc_new4 <- applyBiasFactor(frc, biasFactor)\n#' \n#' plotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n#' \n#' # You can also give name to this input list.\n#' TSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\n#' names(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\n#' plotTS(list = TSlist, plot = 'cum')\n#' \n#' \n#' # If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n#' # for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n#' # for example Dec, Jan and Feb of every year from year 1999-2005.\n#' # In such case, you need to extract certain months and years from observed time series.\n#' # extractPeriod() can be then used.\n#' \n#' \n#'\n#'\n#'\n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' @author Yuanchao Xu \\email{xuyuanchao37@@gmail.com }\n#' \n#' @importFrom methods setMethod\n#' @export\n#' \n#' \n# debug by trace(\"getBiasFactor\", browser, exit=browser, signature = c(\"list\", \"list\"))\nsetGeneric('getBiasFactor', function(hindcast, obs, method = 'scaling', scaleType = 'multi', \n preci = FALSE, prThreshold = 0, extrapolate = 'no') {\n standardGeneric('getBiasFactor')\n})\n\n#' @describeIn getBiasFactor\nsetMethod('getBiasFactor', signature('data.frame', 'data.frame'), \n function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- getBiasFactor.TS(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n\n# This is for the grid file from downscaleR\n#' @describeIn getBiasFactor\n#' @importFrom methods new\nsetMethod('getBiasFactor', signature('list', 'list'), \n function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n result <- getBiasFactor.list(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate)\n return(result)\n })\n\n\n\n\n#' Apply bias factor to different forecasts for multi/operational/real time bias correction.\n#' \n#' When you do multi/operational/real time bias correction. It's too expensive\n#' to input hindcast and obs every time. Especially when you have a long period of hindcast\n#' and obs, but only a short period of frc, it's too unecessary to read and compute hindcast\n#' and obs everytime. Therefore, biasFactor is designed. Using \\code{getBiasFactor}, you can\n#' get the biasFactor with hindcast and observation, then you can use \\code{applyBiasFactor} to \n#' apply the biasFactor to different forecasts. \n#' \n#' @param frc a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, \n#' representing the frc data. Check details for more information.\n#' @param biasFactor a file containing all the information of the calibration, will be\n#' applied to different forecasts.\n#' @param obs for some methods, observation input is necessary. obs is a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, \n#' representing the observation data. Default value is NULL.\n#' @seealso \\code{\\link{biasCorrect}} for method used in bias correction. \n#' \\code{\\link{getBiasFactor}}, for the first part.\n#' \n#' @details \n#' \n#' Information about the method and how biasCorrect works can be found in \\code{\\link{biasCorrect}}\n#' \n#' \\strong{why use biasFactor}\n#' \n#' As for forecasting, for daily data, there is usually no need to have\n#' different bias factor every different day. You can calculate one bisa factor using a long\n#' period of hindcast and obs, and apply that factor to different frc.\n#' \n#' For example,\n#' \n#' You have 10 years of hindcast and observation. you want to do bias correction for some \n#' forecasting product, e.g. system 4. For system 4, each month, you will get a new forecast\n#' about the future 6 months. So if you want to do the real time bias correction, you have to\n#' take the 10 years of hindcast and observation data with you, and run \\code{biasCorrect} every\n#' time you get a new forecast. That's too expensive.\n#' \n#' For some practical use in forecasting, there isn't a so high demand for accuracy. E.g.,\n#' Maybe for February and March, you can use the same biasFactor, no need to do the computation \n#' again. \n#' \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples \n#' \n#' ######## hyfo grid file biascorrection\n#' ########\n#' \n#' # If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect\n#' # the file.\n#' \n#' # First load ncdf file.\n#' filePath <- system.file(\"extdata\", \"tnc.nc\", package = \"hyfo\")\n#' varname <- getNcdfVar(filePath) \n#' nc <- loadNcdf(filePath, varname)\n#' \n#' data(tgridData)\n#' #' # Since the example data, has some NA values, the process will include some warning #message, \n#' # which can be ignored in this case.\n#' \n#' \n#' \n#' # Then we will use nc data as forecasting data, and use itself as hindcast data,\n#' # use tgridData as observation.\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData)\n#' newFrc <- applyBiasFactor(nc, biasFactor)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant',\n#' preci = TRUE)\n#' # This method needs obs input.\n#' newFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData)\n#' \n#' biasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE)\n#' newFrc <- applyBiasFactor(nc, biasFactor) \n#' \n#' \n#' ######## Time series biascorrection\n#' ########\n#' \n#' # Use the time series from testdl as an example, we take frc, hindcast and obs from testdl.\n#' data(testdl)\n#' \n#' # common period has to be extracted in order to better train the forecast.\n#' \n#' datalist <- extractPeriod(testdl, startDate = '1994-1-1', endDate = '1995-10-1')\n#' \n#' frc <- datalist[[1]]\n#' hindcast <- datalist[[2]]\n#' obs <- datalist[[3]]\n#' \n#' \n#' # The data used here is just for example, so there could be negative data.\n#' \n#' # default method is scaling\n#' biasFactor <- getBiasFactor(hindcast, obs)\n#' frc_new <- applyBiasFactor(frc, biasFactor)\n#' \n#' # for precipitation data, extra process needs to be executed, so you have to tell\n#' # the program to it is a precipitation data.\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, preci = TRUE)\n#' frc_new1 <- applyBiasFactor(frc, biasFactor)\n#' \n#' # You can use other methods to biascorrect, e.g. delta method. \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'delta')\n#' # delta method needs obs input.\n#' frc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' # \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE)\n#' # eqm needs obs input\n#' frc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs)\n#' \n#' biasFactor <- getBiasFactor(hindcast, obs, method = 'gqm', preci = TRUE)\n#' frc_new4 <- applyBiasFactor(frc, biasFactor)\n#' \n#' plotTS(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4, plot = 'cum')\n#' \n#' # You can also give name to this input list.\n#' TSlist <- list(obs, frc, frc_new, frc_new1, frc_new2, frc_new3, frc_new4)\n#' names(TSlist) <- c('obs', 'frc', 'delta', 'delta_preci', 'scale', 'eqm', 'gqm')\n#' plotTS(list = TSlist, plot = 'cum')\n#' \n#' \n#' # If the forecasts you extracted only has incontinuous data for certain months and years, e.g.,\n#' # for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be \n#' # for example Dec, Jan and Feb of every year from year 1999-2005.\n#' # In such case, you need to extract certain months and years from observed time series.\n#' # extractPeriod() can be then used.\n#' \n#' \n#'\n#'\n#'\n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\n#' \n#' @author Yuanchao Xu \\email{xuyuanchao37@@gmail.com }\n#' \n#' @export\nsetGeneric('applyBiasFactor', function(frc, biasFactor, obs = NULL) {\n standardGeneric('applyBiasFactor')\n})\n\n#' @describeIn applyBiasFactor\n#' @importFrom methods setMethod\nsetMethod('applyBiasFactor', signature('data.frame', 'biasFactor'), \n function(frc, biasFactor, obs) {\n result <- applyBiasFactor.TS(frc, biasFactor, obs)\n return(result)\n })\n \n#' @describeIn applyBiasFactor\n#' @importFrom methods setMethod\nsetMethod('applyBiasFactor', signature('list', 'biasFactor.hyfo'), \n function(frc, biasFactor, obs) {\n result <- applyBiasFactor.list(frc, biasFactor, obs)\n return(result)\n })\n\n\n### generic functions\ngetBiasFactor.TS <- function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n \n if (!grepl('-|/', obs[1, 1]) | !grepl('-|/', hindcast[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.If your input is a hyfo dataset, put input = \"hyfo\" as an\n argument, check help for more info.')\n }\n \n # change to date type is easier, but in case in future the flood part is added, Date type doesn't have\n # hour, min and sec, so, it's better to convert it into POSIxlt.\n \n # if condition only accepts one condition, for list comparison, there are a lot of conditions, better\n # further process it, like using any.\n if (any(as.POSIXlt(hindcast[, 1]) != as.POSIXlt(obs[, 1]))) {\n warning('time of obs and time of hindcast are not the same, which may cause inaccuracy in \n the calibration.')\n }\n n <- ncol(hindcast)\n \n # For every column, it's biascorrected respectively.\n biasFactor <- lapply(2:n, function(x) getBiasFactor_core(hindcast[, x], obs[, 2], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate))\n if (n - 1 > 1) {\n biasFactor_all <- new('biasFactor.multiMember', biasFactor = biasFactor, memberDim = n - 1,\n method = method, preci = preci, prThreshold = prThreshold, scaleType = scaleType, \n extrapolate = extrapolate)\n \n } else {\n biasFactor_all <- new('biasFactor', biasFactor = biasFactor, method = method, \n preci = preci, prThreshold = prThreshold, scaleType = scaleType, \n extrapolate = extrapolate)\n }\n \n return(biasFactor_all)\n}\n\ngetBiasFactor.list <- function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) {\n \n ## Check if the data is a hyfo grid data.\n checkHyfo(hindcast, obs)\n \n hindcastData <- hindcast$Data\n obsData <- obs$Data\n \n ## save frc dimension order, at last, set the dimension back to original dimension\n hindcastDim <- attributes(hindcastData)$dimensions\n \n ## ajust the dimension into general dimension order.\n obsData <- adjustDim(obsData, ref = c('lon', 'lat', 'time'))\n \n ## CheckDimLength, check if all the input dataset has different dimension length\n # i.e. if they all have the same lon and lat number.\n checkDimLength(hindcastData, obsData, dim = c('lon', 'lat'))\n \n \n # Now real bias correction is executed.\n \n memberIndex <- grepAndMatch('member', attributes(hindcastData)$dimensions)\n \n # For dataset that has a member part \n if (!is.na(memberIndex)) {\n \n hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time', 'member'))\n \n # The following code may speed up because it doesn't use for loop.\n # It firstly combine different array into one array. combine the time \n # dimension of frc, hindcast and obs. Then use apply, each time extract \n # the total time dimension, and first part is frc, second is hindcast, third\n # is obs. Then use these three parts to bias correct. All above can be written\n # in one function and called within apply. But too complicated to understand,\n # So save it for future use maybe.\n \n # for (member in 1:dim(frcData)[4]) {\n # totalArr <- array(c(frcData[,,, member], hindcastData[,,, member], obsData),\n # dim = c(dim(frcData)[1], dim(frcData)[2], \n # dim(frcData)[3] + dim(hindcastData)[3] + dim(obsData)[3]))\n # }\n \n biasFactor_all <- vector(mode = \"list\", length = dim(hindcastData)[4])\n for (member in 1:dim(hindcastData)[4]) {\n biasFactor_all[[member]] <- vector(mode = 'list', length = dim(hindcastData)[1])\n for (lon in 1:dim(hindcastData)[1]) {\n biasFactor_all[[member]][[lon]] <- vector(mode = 'list', length = dim(hindcastData)[2])\n for (lat in 1:dim(hindcastData)[2]) {\n biasFactor_all[[member]][[lon]][[lat]] <- getBiasFactor_core(hindcastData[lon, lat,, member], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n }\n \n biasFactor <- new('biasFactor.hyfo', biasFactor = biasFactor_all, method = method, preci = preci,\n prThreshold = prThreshold, scaleType = scaleType, extrapolate = extrapolate, \n lonLatDim = calcuDim(hindcastData, dim = c('lon', 'lat')),\n memberDim = calcuDim(hindcastData, dim = 'member'))\n } else {\n \n hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time'))\n \n biasFactor_all <- vector(mode = 'list', length = dim(hindcastData)[1])\n for (lon in 1:dim(hindcastData)[1]) {\n biasFactor_all[[lon]] <- vector(mode = 'list', length = dim(hindcastData)[2]) \n for (lat in 1:dim(hindcastData)[2]) {\n biasFactor_all[[lon]][[lat]] <- getBiasFactor_core(hindcastData[lon, lat,], obsData[lon, lat,], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate)\n }\n }\n biasFactor <- new('biasFactor.hyfo', biasFactor = biasFactor_all, method = method, preci = preci,\n prThreshold = prThreshold, scaleType = scaleType, extrapolate = extrapolate, \n lonLatDim = calcuDim(hindcastData, dim = c('lon', 'lat')))\n \n }\n \n return(biasFactor)\n}\n\napplyBiasFactor.TS <- function(frc, biasFactor, obs) {\n method <- biasFactor@method\n preci <- biasFactor@preci\n prThreshold <- biasFactor@prThreshold\n scaleType <- biasFactor@scaleType\n extrapolate <- biasFactor@extrapolate\n memberDim <- biasFactor@memberDim\n biasFactor <- biasFactor@biasFactor\n \n \n # First check if the first column is Date\n if (!grepl('-|/', frc[1, 1])) {\n stop('First column is not date or Wrong Date formate, check the format in ?as.Date{base} \n and use as.Date to convert.If your input is a hyfo dataset, put input = \"hyfo\" as an\n argument, check help for more info.')\n }\n # change to date type is easier, but in case in future the flood part is added, Date type doesn't have\n # hour, min and sec, so, it's better to convert it into POSIxlt.\n \n # In this case more than one value columns exist in the dataset, both frc and hindcast.\n \n n <- ncol(frc)\n if (n-1 != memberDim) stop('frc and biasFactor have different members.')\n \n \n # For every column, it's biascorrected respectively.\n frc_data <- lapply(2:n, function(x) applyBiasFactor_core(frc[, x], biasFactor = biasFactor[[x - 1]], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate, obs = obs[, 2]))\n frc_data <- do.call('cbind', frc_data)\n rownames(frc_data) <- NULL\n \n names <- colnames(frc)\n frc_new <- data.frame(frc[, 1], frc_data)\n colnames(frc_new) <- names\n \n return(frc_new)\n \n}\n\napplyBiasFactor.list <- function(frc, biasFactor, obs) {\n method <- biasFactor@method\n preci <- biasFactor@preci\n prThreshold <- biasFactor@prThreshold\n scaleType <- biasFactor@scaleType\n extrapolate <- biasFactor@extrapolate\n lonLatDim <- biasFactor@lonLatDim\n memberDim <- biasFactor@memberDim\n biasFactor <- biasFactor@biasFactor\n \n ## Check if the data is a hyfo grid data.\n checkHyfo(frc)\n \n \n obsData <- obs$Data\n frcData <- frc$Data\n \n ## save frc dimension order, at last, set the dimension back to original dimension\n frcDim <- attributes(frcData)$dimensions\n \n ## ajust the dimension into general dimension order.\n obsData <- adjustDim(obsData, ref = c('lon', 'lat', 'time'))\n \n ## CheckDimLength, check if all the input dataset has different dimension length\n # i.e. if they all have the same lon and lat number.\n if (!identical(calcuDim(frcData, dim = c('lon', 'lat')), lonLatDim)) {\n stop('frc data has different lon and lat from hindcast data.')\n }\n \n \n # Now real bias correction is executed.\n \n memberIndex <- grepAndMatch('member', attributes(frcData)$dimensions)\n \n # For dataset that has a member part \n if (!is.na(memberIndex)) {\n # check if frcData and hindcastData has the same dimension and length.\n if (calcuDim(frcData, dim = 'member') != memberDim) {\n stop('frc data has different member number from hindcast.')\n } \n \n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time', 'member'))\n \n # The following code may speed up because it doesn't use for loop.\n # It firstly combine different array into one array. combine the time \n # dimension of frc, hindcast and obs. Then use apply, each time extract \n # the total time dimension, and first part is frc, second is hindcast, third\n # is obs. Then use these three parts to bias correct. All above can be written\n # in one function and called within apply. But too complicated to understand,\n # So save it for future use maybe.\n \n # for (member in 1:dim(frcData)[4]) {\n # totalArr <- array(c(frcData[,,, member], hindcastData[,,, member], obsData),\n # dim = c(dim(frcData)[1], dim(frcData)[2], \n # dim(frcData)[3] + dim(hindcastData)[3] + dim(obsData)[3]))\n # }\n \n \n for (member in 1:dim(frcData)[4]) {\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,, member] <- applyBiasFactor_core(frcData[lon, lat,,member], biasFactor = biasFactor[[member]][[lon]][[lat]], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate, obs = obsData[lon, lat,])\n }\n }\n }\n } else {\n frcData <- adjustDim(frcData, ref = c('lon', 'lat', 'time'))\n for (lon in 1:dim(frcData)[1]) {\n for (lat in 1:dim(frcData)[2]) {\n frcData[lon, lat,] <- applyBiasFactor_core(frcData[lon, lat,], biasFactor = biasFactor[[lon]][[lat]], method = method,\n scaleType = scaleType, preci = preci, prThreshold = prThreshold, \n extrapolate = extrapolate, obs = obsData[lon, lat,])\n }\n }\n }\n \n frcData <- adjustDim(frcData, ref = frcDim)\n frc$Data <- frcData\n frc$biasCorrected_by <- method\n frc_new <- frc\n \n return(frc_new)\n}\n\n\n#################\n################# core functions for multi bias correction.\n\n#' @importFrom MASS fitdistr\n#' @importFrom stats ecdf quantile pgamma qgamma rgamma\n#' \n#' @references \n#' Bias correction methods come from \\code{biasCorrection} from \\code{dowscaleR}\n#' \n#' \\itemize{\n#' \n#' \\item Santander Meteorology Group (2015). downscaleR: Climate data manipulation and statistical downscaling. R\n#' package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki\n#' \n#' \\item R.A.I. Wilcke, T. Mendlik and A. Gobiet (2013) Multi-variable error correction of regional climate models. Climatic Change, 120, 871-887\n#' \n#' \\item A. Amengual, V. Homar, R. Romero, S. Alonso, and C. Ramis (2012) A Statistical Adjustment of Regional Climate Model Outputs to Local Scales: Application to Platja de Palma, Spain. J. Clim., 25, 939-957\n#' \n#' \\item C. Piani, J. O. Haerter and E. Coppola (2009) Statistical bias correction for daily precipitation in regional climate models over Europe, Theoretical and Applied Climatology, 99, 187-192\n#' \n#' \\item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529\n#' }\ngetBiasFactor_core <- function(hindcast, obs, method , scaleType, preci, prThreshold, extrapolate){\n # If the variable is precipitation, some further process needs to be added.\n # The process is taken from downscaleR, to provide a more reasonable hindcast, used in the calibration.\n \n \n # check if frc, hindcast or obs are all na values\n if (!any(!is.na(obs)) | !any(!is.na(hindcast))) {\n warning('In this cell, hindcast or obs data is missing. No biasCorrection for this cell.')\n return(NA)\n } \n \n if (preci == TRUE) {\n preprocessHindcast_res <- preprocessHindcast(hindcast = hindcast, obs = obs, prThreshold = prThreshold)\n hindcast <- preprocessHindcast_res[[1]]\n minHindcastPreci <- preprocessHindcast_res[[2]]\n }\n \n # default is the simplest method in biascorrection, just do simple addition and subtraction.\n if (method == 'delta') {\n biasFactor <- getBiasFactor_core_delta(hindcast)\n } else if (method == 'scaling') {\n biasFactor <- getBiasFactor_core_scaling(hindcast, obs, scaleType)\n } else if (method == 'eqm') {\n # In this method, the value is bounded by the observation\n # Preci or not both have the same biasFactor\n if (preci == FALSE) {\n biasFactor <- getBiasFactor_core_eqm_nonPreci(hindcast, obs, extrapolate)\n } else {\n biasFactor <- getBiasFactor_core_eqm_preci(hindcast, obs, minHindcastPreci, extrapolate, prThreshold)\n }\n \n \n } else if (method == 'gqm') {\n if (preci == FALSE) stop ('gqm method only applys to precipitation, please set preci = T')\n biasFactor <- getBiasFactor_core_gqm(hindcast, obs, prThreshold, minHindcastPreci)\n }\n \n if (preci == TRUE) biasFactor$minHindcastPreci <- minHindcastPreci\n \n return(biasFactor)\n}\n\n\napplyBiasFactor_core <- function(frc, biasFactor, method, preci, prThreshold, scaleType,\n extrapolate, obs = NULL) {\n \n if (!any(!is.na(biasFactor))) {\n warning('In this cell, biasFactor is missing.No biasCorrection for this cell.')\n # here return NA or return the unprocessed frc, both are OK. But return NA is more\n # obvious for user.\n return(NA)\n }\n \n if (method == 'delta') {\n if (is.null(obs)) stop('This method needs obs input.')\n if (length(frc) != length(obs)) stop('This method needs frc data have the same length as obs data.')\n frc <- applyBiasFactor_core_delta(frc = frc, biasFactor = biasFactor, obs = obs)\n } else if (method == 'scaling') {\n frc <- applyBiasFactor_core_scaling(frc = frc, biasFactor = biasFactor, scaleType = scaleType)\n } else if (method == 'eqm') {\n if (is.null(obs)) stop('This method needs obs input.')\n if (preci == FALSE) {\n frc <- applyBiasFactor_core_eqm_nonPreci(frc = frc, biasFactor = biasFactor, extrapolate = extrapolate, \n obs = obs)\n } else {\n frc <- applyBiasFactor_core_eqm_preci(frc = frc, biasFactor = biasFactor, extrapolate = extrapolate, \n prThreshold = prThreshold, obs = obs)\n }\n } else if (method == 'gqm') {\n frc <- applyBiasFactor_core_gqm(frc = frc, biasFactor = biasFactor)\n }\n \n return(frc)\n}\n\n\ngetBiasFactor_core_delta <- function(hindcast) {\n biasFactor <- list()\n biasFactor$hindcastMean <- mean(hindcast, na.rm = TRUE)\n return(biasFactor)\n}\napplyBiasFactor_core_delta <- function(frc, biasFactor, obs) {\n hindcastMean <- biasFactor$hindcastMean\n frcMean <- mean(frc, na.rm = TRUE)\n return(obs - hindcastMean + frcMean)\n}\n\ngetBiasFactor_core_scaling <- function(hindcast, obs, scaleType) {\n biasFactor <- list()\n \n hindcastMean <- mean(hindcast, na.rm = TRUE)\n obsMean <- mean(obs, na.rm = TRUE)\n \n if (scaleType == 'multi') {\n biasFactor$scale <- obsMean / hindcastMean\n \n } else if (scaleType == 'add') {\n biasFactor$scale <- obsMean - hindcastMean\n }\n \n return(biasFactor)\n}\n\napplyBiasFactor_core_scaling <- function(frc, biasFactor, scaleType) {\n \n if (scaleType == 'multi') {\n frc <- frc * biasFactor$scale\n \n } else if (scaleType == 'add') {\n frc <- frc + biasFactor$scale\n }\n return(frc)\n}\n\ngetBiasFactor_core_eqm_nonPreci <- function(hindcast, obs, extrapolate) {\n \n biasFactor <- list()\n biasFactor$ecdfHindcast <- ecdf(hindcast)\n \n if (extrapolate == 'constant') {\n biasFactor$maxHindcast <- max(hindcast, na.rm = TRUE)\n biasFactor$minHindcast <- min(hindcast, na.rm = TRUE)\n biasFactor$higherIndex_dif <- biasFactor$maxHindcast - max(obs, na.rm = TRUE)\n biasFactor$lowerIndex_dif <- biasFactor$minHindcast - min(obs, na.rm = TRUE)\n }\n \n return(biasFactor)\n}\n\ngetBiasFactor_core_eqm_preci <- function(hindcast, obs, minHindcastPreci, extrapolate,\n prThreshold) {\n \n biasFactor <- list()\n biasFactor$ecdfHindcast <- ecdf(hindcast[hindcast > minHindcastPreci])\n \n if (extrapolate == 'constant') {\n biasFactor$maxHindcast <- max(hindcast, na.rm = TRUE)\n biasFactor$minHindcast <- min(hindcast, na.rm = TRUE)\n biasFactor$higherIndex_dif <- biasFactor$maxHindcast - max(obs, na.rm = TRUE)\n biasFactor$lowerIndex_dif <- biasFactor$minHindcast - min(obs, nna.rm = TRUE)\n }\n biasFactor$availableHindcastLength <- length(which(hindcast > minHindcastPreci))\n \n # drizzle parameter 1\n biasFactor$drizzleP1 <- min(hindcast[hindcast > minHindcastPreci], na.rm = TRUE)\n # biasFactor$prThreshold <- prThreshold\n return(biasFactor)\n}\n\napplyBiasFactor_core_eqm_nonPreci <- function(frc, biasFactor, extrapolate, obs) {\n ecdfHindcast <- biasFactor$ecdfHindcast\n \n if (extrapolate == 'constant') {\n higherIndex <- which(frc > biasFactor$maxHindcast)\n lowerIndex <- which(frc < biasFactor$minHindcast)\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(frc), extrapolateIndex)\n \n # In case extrapolateIndex is of length zero, than extrapolate cannot be used afterwards\n # So use setdiff(1:length(sim), extrapolateIndex), if extrapolateIndex == 0, than it will\n # return 1:length(sim)\n \n if (length(higherIndex) > 0) {\n \n frc[higherIndex] <- frc[higherIndex] - biasFactor$higherIndex_dif\n }\n \n if (length(lowerIndex) > 0) {\n \n frc[lowerIndex] <- frc[lowerIndex] - biasFactor$lowerIndex_dif\n }\n \n frc[non_extrapolateIndex] <- quantile(obs, probs = ecdfHindcast(frc[non_extrapolateIndex]), \n na.rm = TRUE, type = 4)\n } else {\n frc <- quantile(obs, probs = ecdfHindcast(frc), na.rm = TRUE, type = 4)\n }\n return(frc)\n}\n\n#' @importFrom stats quantile\napplyBiasFactor_core_eqm_preci <- function(frc, biasFactor, extrapolate, prThreshold, obs) {\n \n # Most of time this condition seems useless because minHindcastPreci comes from hindcast, so there will be\n # always hindcast > minHindcastPreci exists.\n # Unless one condition that minHindcastPreci is the max in the hindcast, than on hindcast > minHindcastPreci\n if (biasFactor$availableHindcastLength > 0) {\n \n ecdfHindcast <- biasFactor$ecdfHindcast\n \n noRain <- which(frc <= biasFactor$minHindcastPreci & !is.na(frc))\n rain <- which(frc > biasFactor$minHindcastPreci & !is.na(frc))\n \n # drizzle is to see whether there are some precipitation between the min frc (over threshold) and \n # min hindcast (over threshold).\n drizzle <- which(frc > biasFactor$minHindcastPreci & frc <= biasFactor$drizzleP1 & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n \n if (extrapolate == 'constant') {\n \n # This higher and lower index mean the extrapolation part\n higherIndex <- which(frc[rain] > biasFactor$maxHindcast)\n lowerIndex <- which(frc[rain] < biasFactor$minHindcast)\n \n extrapolateIndex <- c(higherIndex, lowerIndex)\n non_extrapolateIndex <- setdiff(1:length(rain), extrapolateIndex)\n \n if (length(higherIndex) > 0) {\n frc[rain[higherIndex]] <- frc[higherIndex] - biasFactor$higherIndex_dif\n }\n \n if (length(lowerIndex) > 0) {\n frc[rain[lowerIndex]] <- frc[lowerIndex] - biasFactor$lowerIndex_dif\n }\n \n \n # Here the original function doesn't accout for the situation that extraploateIndex is 0\n # if it is 0, rain[-extraploateIndex] would be nothing\n \n # Above has been solved by using setdiff.\n frc[rain[non_extrapolateIndex]] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain[non_extrapolateIndex]]), \n na.rm = TRUE, type = 4)\n \n } else {\n \n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], \n probs = ecdfHindcast(frc[rain]), na.rm = TRUE, type = 4)\n }\n }\n if (length(drizzle) > 0){\n \n # drizzle part is a seperate part. it use the ecdf of frc (larger than minHindcastPreci) to \n # biascorrect the original drizzle part \n frc[drizzle] <- quantile(frc[which(frc > biasFactor$drizzleP1 & !is.na(frc))], \n probs = ecdfFrc(frc[drizzle]), na.rm = TRUE, \n type = 4)\n }\n \n frc[noRain] <- 0\n \n } else {\n # in this condition minHindcastPreci is the max of hindcast, so all hindcast <= minHindcastPreci\n # And frc distribution is used then.\n noRain <- which(frc <= biasFactor$minHindcastPreci & !is.na(frc))\n rain <- which(frc > biasFactor$minHindcastPreci & !is.na(frc))\n \n if (length(rain) > 0) {\n ecdfFrc <- ecdf(frc[rain])\n frc[rain] <- quantile(obs[which(obs > prThreshold & !is.na(obs))], probs = ecdfFrc(frc[rain]), \n na.rm = TRUE, type = 4)\n }\n frc[noRain]<-0\n }\n return(frc)\n}\n\n#' @importFrom MASS fitdistr\ngetBiasFactor_core_gqm <- function(hindcast, obs, prThreshold, minHindcastPreci) {\n if (any(obs > prThreshold)) {\n biasFactor <- list()\n ind <- which(obs > prThreshold & !is.na(obs))\n obsGamma <- fitdistr(obs[ind],\"gamma\")\n biasFactor$obsShape <- obsGamma$estimate[1]\n biasFactor$obsRate <- obsGamma$estimate[2]\n \n ind <- which(hindcast > 0 & !is.na(hindcast))\n hindcastGamma <- fitdistr(hindcast[ind],\"gamma\")\n biasFactor$hindcastShape <- hindcastGamma$estimate[1]\n biasFactor$hindcastRate <- hindcastGamma$estimate[2]\n biasFactor$minHindcastPreci <- minHindcastPreci\n \n } else {\n warning('All the observations of this cell(station) are lower than the threshold, \n no biasFactor returned.')\n biasFactor <- NA\n }\n return(biasFactor)\n}\n\n#' @importFrom stats pgamma qgamma\napplyBiasFactor_core_gqm <- function(frc, biasFactor) {\n \n rain <- which(frc > biasFactor$minHindcastPreci & !is.na(frc))\n noRain <- which(frc <= biasFactor$minHindcastPreci & !is.na(frc))\n \n probF <- pgamma(frc[rain], biasFactor$hindcastShape, rate = biasFactor$hindcastRate)\n frc[rain] <- qgamma(probF, biasFactor$obsShape, rate = biasFactor$obsRate)\n frc[noRain] <- 0\n \n return(frc)\n}", - "created" : 1449959888674.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "2627193633", - "id" : "BDA65B7E", - "lastKnownWriteTime" : 1449960337, - "path" : "E:/1/R/hyfo/R/multi-biasCorrect(generic).R", - "project_path" : "R/multi-biasCorrect(generic).R", - "properties" : { - }, - "relative_order" : 14, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/E1BDCB97 b/.Rproj.user/D53FD3E6/sdb/per/t/E1BDCB97 deleted file mode 100644 index f2fcb1a..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/E1BDCB97 +++ /dev/null @@ -1,17 +0,0 @@ -{ - "contents" : "#' Collect data from csv for Anarbe case.\n#' \n#' Collect data from the gauging stations in spain, catchement Anarbe\n#' \n#' @param folderName A string showing the path of the folder holding different csv files.\n#' @param output A boolean showing whether the output is given, default is T.\n#' @return The collected data from different csv files.\n#' @examples\n#' \n#' #use internal data as an example.\n#' file <- system.file(\"extdata\", \"1999.csv\", package = \"hyfo\")\n#' folder <- strsplit(file, '1999')[[1]][1]\n#' a <- collectData_csv_anarbe(folder)\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \n#' \\itemize{\n#' \\item http://meteo.navarra.es/estaciones/mapadeestaciones.cfm\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n#' \n#' @source http://meteo.navarra.es/estaciones/mapadeestaciones.cfm\n#' @export\n#' @importFrom utils tail\ncollectData_csv_anarbe <- function(folderName, output = TRUE){\n \n fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE)\n data <- lapply(fileNames, readColumn_csv_anarbe)\n data <- do.call('rbind', data)\n data <- data[, 1:2]\n data[, 1] <- as.Date(data[, 1], format = '%d/%m/%Y')\n \n #newFileName <- file.choose(new = T)\n #write.table(data_new,file=newFileName,row.names = F, col.names = F,sep=',')\n a <- unlist(strsplit(folderName, '\\\\\\\\|/'))\n tarName <- tail(a, 2)[1]\n colnames(data) <- c('Date', tarName)\n \n if (output) return(data)\n}\n\n\nreadColumn_csv_anarbe <- function(fileName){\n data <- read.csv(fileName, skip = 4)\n endIndex <- which(data == '', arr.ind = TRUE)[1]-1\n \n data <- data[1:endIndex, ]\n \n if (!is.null(levels(data[, 2]))) {\n data[, 2] <- as.numeric(levels((data[, 2])))[data[, 2]]\n }\n \n colnames(data) <- c('Date', 'target')\n message(fileName)\n \n return(data)\n}\n\n\n\n#' Collect data from different excel files\n#' \n#' @param folderName A string showing the folder path.\n#' @param keyword A string showing the extracted column, e.g., waterLevel, waterBalance.\n#' @param output A boolean showing whether the output is given.\n#' @return The collected data from different excel files.\n#' @export\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n# @importFrom utils write.table\ncollectData_excel_anarbe <- function(folderName, keyword = NULL, output = TRUE){\n \n message('In order to make \"hyfo\" easier to be installed, this part is commented,\n check original R file in your computer or go to \n https://github.com/Yuanchao-Xu/hyfo/blob/master/R/collectData_excel.R\n for ideas.')\n \n \n # newFileName <- file.choose(new = TRUE)\n # message ('new file should be located a different location than the excel folder, \n # in order to avoid error.\n # At least 2 excels should be in the folder\\n')\n # \n # message ('this function only applies to strange spain dem operation record file, and this strange file changes\n # its format in the middle of the record. For other applications, some tiny changes needs to be made.')\n # if (is.null(keyword)) stop('key word is needed, e.g.\"waterLevel\".')\n # \n # fileNames <- list.files(folderName, pattern = '*.xls', full.names = TRUE)\n # data <- lapply(fileNames, FUN = readColumn_excel_anarbe, keyword = keyword)\n # checkBind(data, 'rbind')\n # data <- do.call('rbind', data)\n # \n # data_new <- data.frame(data)\n # \n # data_new <- data_new[order(data_new[, 1]), ]\n # \n # \n # startDate <- data_new[1, 1]\n # endDate <- data_new[length(data_new[, 1]), 1]\n # \n # Date <- as.factor(seq(startDate, endDate, by = 1))\n # \n # if (length(Date) != length(data_new[, 1])) stop('check if the excel files are continuous')\n # \n # colnames(data_new) <- c('Date', keyword)\n # \n # write.table(data_new, file = newFileName,\n # row.names = FALSE, col.names = TRUE, sep = ',')\n # if(output == TRUE) return(data_new)\n}\n\n# \n# @importFrom xlsx read.xlsx\n# readTable_excel_anarbe <- function(fileName){\n# \n# index <- tail(strsplit(fileName, '\\\\.|\\\\ ')[[1]], 3)\n# raw_year <- index[1]\n# raw_mon <- index[2]\n# \n# raw <- read.xlsx(fileName, sheetName='A')\n# startRow <- which(raw == 'COTA', arr.ind = TRUE)[1]+4\n# startCol <- which(raw == 'COTA',arr.ind = TRUE)[2]-1\n# stopRow <- which(raw =='TOTAL',arr.ind = TRUE)[1]-1\n# stopCol1 <- startCol + 17\n# stopCol2 <- which(raw == 'SUPERFICIE', arr.ind = TRUE)[2]\n# data <- cbind(raw[startRow:stopRow,startCol:stopCol1], raw[startRow:stopRow,stopCol2])\n# \n# \n# yearIndex <- rep(raw_year, stopRow-startRow+1)\n# monIndex <- rep(raw_mon, stopRow-startRow+1)\n# \n# data <- cbind(yearIndex, monIndex, data)\n# return(data)\n# }\n# # \n# @importFrom utils tail\n# readColumn_excel_anarbe <- function(fileName, keyword = NULL){\n# \n# index <- tail(strsplit(fileName, '\\\\.|\\\\ ')[[1]],3)\n# year <- as.numeric(index[1])\n# mon <- as.numeric(index[2])\n# \n# if (year == 99) {\n# year = year + 1900\n# } else year = year + 2000\n# \n# word = c('COTA', 'Cota\\n(m)', 'TOTAL', ' TOTAL')\n# \n# if (keyword == 'waterLevel') {\n# searchWord <- c('COTA', 'Cota\\n(m)')\n# } else if (keyword == 'discharge_ERE') {\n# searchWord <- c('AF.ERE-', 'Caudal\\n(m??/s)')\n# } else if (keyword == 'waterBalance') {\n# searchWord <- c('INCREMENTO', 'al Canal Bajo', 'AFORO',\n# 'Variaci??n\\nvolumen embalsado')\n# } else if (keyword == 'surfaceArea') {\n# searchWord <- c('SUPERFICIE', 'SUPERFICIE')\n# } else if (keyword == 'volume') {\n# searchWord <- c('EMBALSADO', 'Volumen\\n(m????)')\n# }\n# \n# \n# if (year == 1999 | year < 2009 | (year == 2009 & mon < 5)) {\n# raw <- xlsx::read.xlsx(fileName, sheetName = 'A')\n# startIndex <- which(raw == word[1], arr.ind = TRUE)\n# endIndex <- which(raw == word[3], arr.ind = TRUE)\n# startRow <- startIndex[1]+4\n# endRow <- endIndex[1]-1\n# \n# dayCol <- endIndex[2]\n# day <- raw[startRow:endRow, dayCol]\n# \n# targetCol <- which(raw == searchWord[1], arr.ind = TRUE)[2]\n# \n# if (is.na(targetCol)) stop(sprintf('capture nothing in %s', fileName))\n# \n# if (keyword == 'waterBalance') {\n# targetStart <- targetCol\n# targetEnd <- which(raw == searchWord[3], arr.ind = TRUE)[2]\n# a <- raw[startRow:endRow, targetStart:targetEnd]\n# a <- sapply(a, function(x) as.numeric(levels(x)[x]))\n# \n# if (year == 1999 & mon == 4) {\n# \n# target <- data.frame(a[, 2] * 86.4, a[, 5] * 86.4, rep(NA, dim(a)[1]), a[, 6] * 86.4,\n# a[, 4] * 86.4, a[, 11] * 86.4, a[, 3], a[, 7], rep(NA, dim(a)[1]), a[, 1])\n# } else {\n# target <- data.frame(a[, 2] * 86.4, a[, 5] * 86.4, a[, 6] * 86.4, a[, 7] * 86.4, \n# a[, 4] * 86.4, a[, 12] * 86.4, a[, 3], a[, 8], rep(NA, dim(a)[1]), a[, 1])\n# } \n# \n# } else {\n# target <- raw[startRow:endRow, targetCol]\n# if (keyword == 'discharge_ERE') target <- as.numeric(levels(target))[target]/1000\n# }\n# \n# } else {\n# raw <- read.xlsx(fileName,sheetName = 'parte del embalse')\n# startIndex <- which(raw == word[2], arr.ind = TRUE)\n# endIndex <- which(raw == word[4], arr.ind = TRUE)\n# startRow <- startIndex[1]+1\n# endRow <- endIndex[1]-2\n# \n# dayCol <- endIndex[2]\n# day <- raw[startRow:endRow, dayCol]\n# targetCol <- which(raw == searchWord[2], arr.ind=TRUE)[2]\n# if (is.na(targetCol)) stop(sprintf('capture nothing in %s', fileName))\n# \n# if (keyword == 'waterBalance') {\n# targetStart <- targetCol\n# targetEnd <- which(raw == searchWord[4], arr.ind=TRUE)[2]\n# target <- raw[startRow:endRow, targetStart:targetEnd]\n# \n# } else {\n# target <- raw[startRow:endRow, targetCol]\n# }\n# \n# }\n# \n# \n# startDate <- as.Date(paste(year, mon, day[1], sep = '-'))\n# endDate <- as.Date(paste(year, mon, tail(day,1), sep = '-'))\n# \n# Date <- seq(startDate, endDate, 1)\n# output <- data.frame(Date, as.vector(target))\n# colnames(output) <- c('Date', seq(1, dim(output)[2] - 1))\n# message(fileName) \n# return(output)\n# \n# }\n# \n\n\n\n\n\n#' collect data from different txt.\n#' \n#' @param folderName A string showing the folder path.\n#' @param output A boolean showing whether the result is given.\n#' @param rangeWord A list containing the keyword and the shift. \n#' defaut is set to be used in spain gauging station.\n#' @examples\n#' \n#' #use internal data as an example.\n#' \n#' \\dontrun{\n#' file <- system.file(\"extdata\", \"1999.csv\", package = \"hyfo\")\n#' folder <- strsplit(file, '1999')[[1]][1]\n#' a <- collectData_txt_anarbe(folder)\n#' }\n#'\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \n#' \\itemize{\n#' \\item http://www4.gipuzkoa.net/oohh/web/esp/02.asp\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n#' \n#' \n#' @source http://www4.gipuzkoa.net/oohh/web/esp/02.asp\n#' @return The collected data from different txt files.\n#' @export\n#' @importFrom utils tail\ncollectData_txt_anarbe <- function(folderName, output = TRUE, rangeWord = c('Ene ', -1, \n 'Total ', -6)){\n #All the code should be ASCII encode, so there should be no strange symbol.\n if (is.null(rangeWord)) {\n stop('rangeWord consists of 4 elements:\n 1. start word which program can recognise.\n 2. shift1, the shift needs to be made. E.g. start word is in line 7, and program\n should read file from line 9, then shift is 9-7 = 2.\n 3. end word, as start word\n 4. shift2, same as shift1, sometimes can be negative\n \n E.g. rangeWord=c(\\\"aaa\\\",2,\\\"bbb\\\",-2)\n if no rangeWord, just input c(NULL,NULL,NULL,NULL)')\n \n }\n \n \n fileNames <- list.files(folderName, pattern = '*.TXT', full.names = TRUE)\n \n data <- lapply(fileNames, FUN = readColumn_txt_anarbe, rangeWord = rangeWord)\n \n data <- do.call('rbind', data)\n \n a <- unlist(strsplit(folderName, '\\\\\\\\|/'))\n tarName <- tail(a, 2)[1]\n colnames(data) <- c('Date', tarName)\n \n #newFileName <- file.choose(new = T)\n message('new file should be located a different location than the excel folder,\n in order to avoid error.\n At least 2 excels should be in the folder')\n \n #write.table(data_new,file=newFileName,row.names = F, col.names = F,sep=',')\n \n \n if (output == TRUE) return(data)\n \n} \n\n\n\nanarbe_txt <- function(dataset, x1, x2){\n \n data <- as.matrix(dataset[x1:x2, 2:13])\n startYear <- data[1, 6]\n \n data <- data[5:35, ]\n \n date <- which(data != ' ', arr.ind = TRUE)\n startDate <- date[1, ]\n \n endDate <- date[length(date[, 1]), ]\n \n startDate <- as.Date(paste(startYear, startDate[2], startDate[1], sep = '-'))\n endDate <- as.Date(paste(startYear, endDate[2], endDate[1], sep = '-'))\n \n Date <- as.factor(seq(startDate, endDate, 1))\n \n dim(data) <- c(length(data), 1)\n \n data <- as.numeric(data[which(data != ' '), ])\n \n if (length(data) != length(Date)) {\n stop('check original txt file. for missing value, the symbol is \"--\", check\n if this symbol is missing somewhere')\n }\n \n output <- data.frame(Date = Date, target = data)\n \n return(output)\n }\n\n\n#' @references \n#' \n#' \\itemize{\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n#' \n#' @importFrom utils read.fwf\nreadColumn_txt_anarbe <- function(fileName, keyword = NULL, rangeWord = NULL){\n \n a <- read.fwf(fileName, widths = rep(10,13))#read file with fixed width\n \n startRow <- which(a == rangeWord[1], arr.ind = TRUE)[, 1]\n startRow <- startRow + as.numeric(rangeWord[2])\n \n endRow <- which(a == rangeWord[3], arr.ind = TRUE)[, 1]\n endRow <- endRow + as.numeric(rangeWord[4])\n \n data <- mapply(FUN = function(x1, x2) anarbe_txt(dataset = a, x1, x2), startRow, endRow)\n \n data_new <- data.frame(Data = unlist(data[1, ]), target = unlist(data[2, ]))\n message(fileName)\n return(data_new)\n}\n", - "created" : 1449334884164.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "897129278", - "id" : "E1BDCB97", - "lastKnownWriteTime" : 1446510987, - "path" : "E:/1/R/hyfo/R/case_anarbe.R", - "project_path" : "R/case_anarbe.R", - "properties" : { - }, - "relative_order" : 7, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/per/t/FCC66B37 b/.Rproj.user/D53FD3E6/sdb/per/t/FCC66B37 deleted file mode 100644 index ce7dc02..0000000 --- a/.Rproj.user/D53FD3E6/sdb/per/t/FCC66B37 +++ /dev/null @@ -1,18 +0,0 @@ -{ - "contents" : "#' get mean rainfall bar plot of the input dataset or time series.\n#' \n#' get mean rainfall bar plot of the input dataset or time series.\n#' \n#' \n#' @param data A list containing different information, should be the result of reading netcdf file using\n#' \\code{\\link{loadNcdf}}, or a time series, with first column the Date, second the value.\n#' Time series can be an ENSEMBLE containning different members. Than the mean value will be given and the range will be given.\n#' @param method A string showing the calculating method of the input time series. More information\n#' please refer to the details.\n#' @param cell A vector containing the locaton of the cell, e.g. c(2, 3), default is \"mean\", representing\n#' the spatially averaged value. Check details for more information.\n#' @param output A string showing the type of the output, if \\code{output = 'ggplot'}, the returned \n#' data can be used in ggplot and \\code{getPreciBar_comb()}; if \\code{output = 'plot'}, the returned data is the plot containing all \n#' layers' information, and can be plot directly or used in grid.arrange; if not set, the data\n#' will be returned.\n#' @param name If \\code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate\n#' different outputs in the later multiplot using \\code{getSpatialMap_comb}.\n#' @param plotRange A boolean showing whether the range will be plotted.\n#' @param member A number showing which member is selected to get, if the dataset has a \"member\" dimension. Default\n#' is NULL, if no member assigned, and there is a \"member\" in dimensions, the mean value of the members will be\n#' taken.\n#' @param omitNA A boolean showing whether the missing value is omitted.\n#' @param info A boolean showing whether the information of the map, e.g., max, mean ..., default is FALSE.\n#' @param ... \\code{title, x, y} showing the title and x and y axis of the plot. e.g. \\code{title = 'aaa'}\n#' @details\n#' There are following methods to be selected, \n#' \"annual\": annual rainfall of each year is plotted. \n#' \"winter\", \"spring\", \"autumn\", \"summer\": seasonal rainfall of each year is plotted.\n#' Month(number 1 to 12): month rainfall of each year is plotted, e.g. march rainfall of each year.\n#' \"meanMonthly\": the mean monthly rainfall of each month over the whole period.\n#' \n#' #Since \"winter\" is a crossing year, 12, 1, 2, 12 is in former year, and 1, 2 are in latter year.\n#' #so winter belongs to the latter year.\n#' \n#' \n#' \\code{cell} representing the location of the cell, NOTE: this location means the index of the cell,\n#' IT IS NOT THE LONGITUDE AND LATITUDE. e.g., \\code{cell = c(2, 3)}, the program will take the 2nd longitude\n#' and 3rd latitude, by the increasing order. Longitude comes first.\n#' \n#' \n#' It is a generic function, if in your case you need to debug, please see \\code{?debug()} \n#' for how to debug S4 method.\n#' \n#' @examples\n#' #gridData provided by package is the result of \\code{loadNcdf()}\n#' data(tgridData)\n#' b1 <- getPreciBar(tgridData, method = 'annual')\n#' b2 <- getPreciBar(tgridData, method = 'meanMonthly')\n#' \n#' data(testdl)\n#' TS <- testdl[[1]]\n#' a <- getPreciBar(TS, method = 'spring')\n#' # if info = T, the information will be given at the bottom.\n#' a <- getPreciBar(TS, method = 'spring', info = TRUE)\n#' \n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @references \n#' \n#' \n#' \\itemize{\n#' \\item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software,\n#' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/.\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' \\item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for\n#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.\n#' }\n#'\n#' \n#' @return The calculated mean value of the input time series and the plot of the result.\n#' @export\nsetGeneric('getPreciBar', function(data, method, cell = 'mean', output = 'data', name = NULL, \n plotRange = TRUE, member = NULL, omitNA = TRUE, info = FALSE,\n ...) {\n standardGeneric('getPreciBar')\n})\n\n#' @describeIn getPreciBar\nsetMethod('getPreciBar', signature('list'), \n function(data, method, cell, output, name, plotRange, member, omitNA, info, ...) {\n TS <- getPreciBar.list(data, cell, member)\n # for hyfo file, in order to process the data, year and month index need to be provided.\n startTime <- as.POSIXlt(data$Dates$start, tz = 'GMT')\n yearIndex <- startTime$year + 1900\n monthIndex <- startTime$mon + 1\n \n result <- getPreciBar.plot(TS, method, output, name, plotRange, omitNA, info, yearIndex,\n monthIndex, ...)\n return(result)\n})\n\n#' @describeIn getPreciBar\nsetMethod('getPreciBar', signature('data.frame'), \n function(data, method, cell, output, name, plotRange, member, omitNA, info, ...) {\n Date <- as.POSIXlt(TS[, 1])\n yearIndex <- Date$year + 1900\n monthIndex <- Date$mon + 1\n TS <- getPreciBar.TS(data)\n result <- getPreciBar.plot(TS, method, output, name, plotRange, omitNA, info, \n yearIndex, monthIndex, ...)\n return(result)\n})\n\n\ngetPreciBar.list <- function(dataset, cell, member) {\n #check input dataset\n checkHyfo(dataset)\n \n data <- dataset$Data\n \n # Dimension needs to be arranged. Make sure first and second dimension is lat and lon.\n data <- adjustDim(data, ref = c('lon', 'lat', 'time'))\n \n # Because in the following part, only 3 dimensions are allowed, so data has to be processed.\n if (is.null(member) & any(attributes(data)$dimensions == 'member')) {\n dimIndex3 <- which(attributes(data)$dimensions != 'member')\n data <- apply(data, MARGIN = dimIndex3, FUN = mean, na.rm = TRUE)\n } else if (!is.null(member) & any(attributes(data)$dimensions == 'member')) {\n dimIndex3 <- which(attributes(data)$dimensions == 'member')\n data <- chooseDim(data, dimIndex3, member, drop = TRUE)\n } else if (!is.null(member) & !any(attributes(data)$dimensions == 'member')){\n stop('There is no member part in the dataset, but you choose one, check the input\n dataset or change your arguments.')\n }\n \n if (identical(cell, 'mean')) {\n TS <- apply(data, MARGIN = 3, FUN = mean, na.rm = TRUE) \n } else {\n TS <- data[cell[1], cell[2], ]\n }\n \n return(TS)\n}\n\n\n#' @importFrom reshape2 melt\ngetPreciBar.TS <- function(TS) {\n \n# Date <- as.POSIXlt(TS[, 1])\n# yearIndex <- Date$year + 1900\n# monthIndex <- Date$mon + 1\n n <- ncol(TS) - 1\n \n if ( n == 1) {\n TS <- TS[, 2]\n } else {\n \n TS <- TS[, -1]\n # month index should be repeat, but years cannot.\n# yearIndex <- sapply(1:n, function(x) yearIndex + x - 1)\n# dim(yearIndex) <- c(n * nrow(yearIndex), 1)\n \n# monthIndex <- rep(monthIndex, n)\n TS <- melt(TS)[, 2]\n \n }\n return(TS)\n}\n\n\n#' @importFrom stats median\n#' @importFrom reshape2 melt\n#' @import ggplot2\ngetPreciBar.plot <- function(TS, method, output, name, plotRange, omitNA, info, \n yearIndex = NULL, monthIndex = NULL, ...) {\n \n \n if (method == 'meanMonthly') {\n \n monthlyPreci <- tapply(TS, INDEX = list(yearIndex, monthIndex), FUN = sum, na.rm = omitNA)\n meanMonthlyPreci <- apply(monthlyPreci, MARGIN = 2, FUN = mean, na.rm = TRUE)\n \n \n title <- 'Mean Monthly Precipitation'\n xlab <- 'Month'\n \n plotPreci <- data.frame(Index = month.abb[as.numeric(colnames(monthlyPreci))], \n Preci = meanMonthlyPreci)\n \n # Here factor has to be reassigned, to keep the original order, or it will be reordered.\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n if (plotRange) {\n maxValue <- apply(monthlyPreci, MARGIN = 2, FUN = max, na.rm = TRUE)\n minValue <- apply(monthlyPreci, MARGIN = 2, FUN = min, na.rm = TRUE)\n \n plotPreci$maxValue <- maxValue\n plotPreci$minValue <- minValue\n \n ylim <- c(0,max(maxValue, na.rm = TRUE) * 1.1)\n \n } else {\n ylim <- c(0,max(meanMonthlyPreci, na.rm = TRUE) * 1.1)\n }\n \n \n } else if (method == 'annual') { \n \n if (length(unique(monthIndex)) < 12) {\n warning ('There are less than 12 months in a year, the results may be inaccurate.')\n }\n \n annualPreci <- tapply(TS, INDEX = yearIndex, FUN = sum, na.rm = TRUE)\n title <- 'Annual Precipitation'\n xlab <- 'Year'\n plotName <- names(annualPreci)\n \n plotPreci <- data.frame(Index = names(annualPreci), Preci = annualPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n ylim <- c(0, max(annualPreci, na.rm = TRUE) * 1.1)\n \n } else if (is.numeric(method)) {\n month <- method\n monExisting <- length(which(unique(monthIndex) == month))\n if (monExisting == 0) stop(\"Your input month doesn't exist in the dataset.\")\n \n monthlyPreci <- getMeanPreci(TS, method = month, yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n # If monthlyPreci length is 1, names need to be added.\n if (length(monthlyPreci) == 1) names(monthlyPreci) <- unique(yearIndex)\n plotPreci <- data.frame(Index = names(monthlyPreci), Preci = monthlyPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste(month.abb[month], 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(monthlyPreci, na.rm = TRUE) * 1.1)\n \n } else if (method == 'spring') { \n \n wm <- match(c(3, 4, 5), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Spring has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'spring', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Spring', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n \n } else if (method == 'summer') {\n \n wm <- match(c(6, 7, 8), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Summer has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'summer', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Summer', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n \n } else if (method == 'autumn') {\n wm <- match(c(9, 10, 11), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Autumn has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'autumn', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Autumn', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n } else if (method == 'winter') {\n wm <- match(c(12, 1, 2), unique(monthIndex))\n if (length(which(!is.na(wm))) < 3) {\n stop('Winter has less than 3 months, check data and try to calculate every month\n seperately or choose another season.')\n }\n \n seasonalPreci <- getMeanPreci(TS, method = 'winter', yearIndex = yearIndex,\n monthIndex = monthIndex, fullResults = TRUE, omitNA = omitNA)\n plotPreci <- data.frame(Index = names(seasonalPreci), Preci = seasonalPreci)\n plotPreci$Index <- factor(plotPreci$Index, levels = plotPreci$Index, ordered = TRUE)\n \n title <- paste('Winter', 'Precipitation over Whole Period', sep = ' ')\n xlab <- 'Year'\n ylim <- c(0, max(seasonalPreci, na.rm = TRUE) * 1.1)\n \n } else {\n stop(paste('No method called \"', method, '\", check help for information'))\n }\n \n \n xlim <- c(0, length(rownames(plotPreci))) \n \n if (info == TRUE) {\n meanValue <- round(mean(plotPreci$Preci, na.rm = TRUE), 2)\n medianValue <- round(median(plotPreci$Preci,na.rm = TRUE), 2)\n plotMean <- paste('Mean', ' = ', meanValue)\n plotMedian <- paste('Median', ' = ', medianValue)\n \n plotMax <- round(max(plotPreci$Preci, na.rm = TRUE), 2)\n plotMin <- round(min(plotPreci$Preci, na.rm = TRUE), 2)\n word <- paste('\\n\\n', paste(' Max', '=', plotMax), ',', paste('Min', '=', plotMin), ',',\n plotMean, ',', plotMedian)\n } else word <- NULL\n \n \n xlab <- paste(xlab, word)\n \n theme_set(theme_bw())\n \n mainLayer <- with(plotPreci, {\n ggplot(plotPreci) +\n geom_bar(aes(x = Index, y = Preci), stat = 'identity', colour = 'black', fill = 'cyan2', width = rel(.4)) +\n xlab(xlab) +\n ylab('Precipitation (mm)') +\n ggtitle(title) +\n labs(empty = NULL, ...) +#in order to pass \"...\", arguments shouldn't be empty.\n theme(plot.title = element_text(size = rel(1.6), face = 'bold'),\n axis.title.x = element_text(size = rel(1.6)),\n axis.title.y = element_text(size = rel(1.6)),\n axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1.9)),\n axis.text.y = element_text(size = rel(1.9)))\n # geom_text(x = min(xlim) + 0.95 * (max(xlim) - min(xlim)), y = min(ylim) + 0.15 * (max(ylim) - min(ylim)),\n # label = word)+\n # geom_hline(yintercept = meanValue) +\n # geom_text(x = min(xlim) + 0.3 * (max(xlim) - min(xlim)), y = meanValue + 3, vjust = 0, label = 'mean') +\n # geom_hline(yintercept = medianValue, colour = 'red') +\n # geom_text(x = min(xlim) + 0.6 * (max(xlim) - min(xlim)), y = medianValue + 3, vjust = 0,\n # label = 'median', colour = 'red')\n })\n \n \n if (plotRange) {\n if (is.null(plotPreci$maxValue)) {\n message('There is no plotRange for this method')\n print(mainLayer)\n } else {\n rangeLayer <- with(plotPreci, {\n geom_errorbar(aes(x = Index, ymax = maxValue, ymin = minValue), width = rel(0.3))\n }) \n print(mainLayer + rangeLayer)\n }\n \n } else {\n print(mainLayer)\n } \n \n if (output == 'plot') {\n return(mainLayer)\n } else if (output == 'ggplot') {\n if (is.null(name)) stop('\"name\" argument not found, \n If you choose \"ggplot\" as output, please assign a name.')\n plotPreci$Name <- rep(name, dim(plotPreci)[1])\n return(plotPreci)\n } else {\n return(plotPreci)\n }\n}\n\n\n\n\n\n\n\n#' Combine bars together\n#' @param ... different barplots generated by \\code{getPreciBar(, output = 'ggplot')}, refer to details.\n#' @details\n#' ..., representing different ouput generated by \\code{getPreciBar(, output = 'ggplot')}, they \n#' have to be of the same type, e.g., \n#' 1. Jan precipitation of different years, Feb precipitation of different years, and... \n#' They are both monthly precipitation, and they share x axis.\n#' \n#' 2. Mean monthly precipitation of different dataset. e.g., long term mean monthly precipitation\n#' and short term mean monthly precipitation. They are both mean monthly precipitation.\n#' \n#' @param nrow A number showing the number of rows.\n#' @param list If input is a list containing different ggplot data, use l\\code{list = inputlist}.\n#' NOTE: yOU HAVE TO PUT A \\code{list = }, before your list.\n#' @param x A string of x axis name.\n#' @param y A string of y axis name.\n#' @param title A string of the title.\n#' @param output A boolean, if chosen TRUE, the output will be given.\n#' @return A combined barplot.\n#' @examples\n#' \n#' data(tgridData)# the result of \\code{\\link{loadNcdf}}\n#' #output type of getPreciBar() has to be 'ggplot'.\n#' b1 <- getPreciBar(tgridData, method = 2, output = 'ggplot', name = 'b1')\n#' b2 <- getPreciBar(tgridData, method = 3, output = 'ggplot', name = 'b2')\n#' \n#' getPreciBar_comb(b1, b2)\n#' \n#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/\n#' \n#' @export\n#' @import ggplot2\n#' @references \n#' \n#' \\itemize{\n#' \\item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009.\n#' }\n#' \ngetPreciBar_comb <- function(..., list = NULL, nrow = 1, x = '', y = '', title = '', output = FALSE) {\n if (!is.null(list)) {\n data_ggplot <- do.call('rbind', list)\n } else {\n \n bars <- list(...)\n checkBind(bars, 'rbind')\n data_ggplot <- do.call('rbind', bars)\n }\n \n if (!class(data_ggplot) == 'data.frame') {\n warning('Your input is probably a list, but you forget to add \"list = \" before it.\n Try again, or check help for more information.')\n } else if (is.null(data_ggplot$Name)) {\n stop('No \"Name\" column in the input data, check the arguments in getPreciBar(), if \n output = \"ggplot\" is assigned, more info please check ?getPreciBar.')\n }\n \n data_ggplot$Name <- factor(data_ggplot$Name, levels = unique(data_ggplot$Name), ordered = TRUE)\n \n theme_set(theme_bw())\n \n mainLayer <- with(data_ggplot, {\n ggplot(data_ggplot) +\n geom_bar(aes(x = Index, y = Preci),fill = 'cyan2', stat = 'identity', \n colour = 'black', width = rel(.4)) +\n facet_wrap( ~ Name, nrow = nrow) +\n theme(plot.title = element_text(size = rel(1.6), face = 'bold'),\n axis.title.x = element_text(size = rel(1.6)),\n axis.title.y = element_text(size = rel(1.6)),\n axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1.9)),\n axis.text.y = element_text(size = rel(1.9))) +\n labs(x = x, y = y, title = title)\n })\n \n if (!any(is.na(match(c('minValue', 'maxValue'), colnames(data_ggplot))))) {\n rangeLayer <- with(data_ggplot, {\n geom_errorbar(aes(x = Index, ymax = maxValue, ymin = minValue), width = rel(0.3))\n }) \n mainLayer <- mainLayer + rangeLayer\n }\n \n \n suppressWarnings(print(mainLayer))\n \n if (output == TRUE) return(data_ggplot)\n }\n\n", - "created" : 1449680759518.000, - "dirty" : false, - "encoding" : "ASCII", - "folds" : "", - "hash" : "3579155930", - "id" : "FCC66B37", - "lastKnownWriteTime" : 1449680802, - "path" : "E:/1/R/hyfo/R/getPreciBar(generic).R", - "project_path" : "R/getPreciBar(generic).R", - "properties" : { - "tempName" : "Untitled1" - }, - "relative_order" : 13, - "source_on_save" : false, - "type" : "r_source" -} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/prop/947FDB3E b/.Rproj.user/D53FD3E6/sdb/prop/947FDB3E new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/.Rproj.user/D53FD3E6/sdb/prop/947FDB3E @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/.Rproj.user/D53FD3E6/sdb/prop/INDEX b/.Rproj.user/D53FD3E6/sdb/prop/INDEX index 12b747d..cf885ce 100644 --- a/.Rproj.user/D53FD3E6/sdb/prop/INDEX +++ b/.Rproj.user/D53FD3E6/sdb/prop/INDEX @@ -1,3 +1,4 @@ +C%3A%2FUsers%2FYuanchao%2FDesktop%2Fmapping.R="947FDB3E" E%3A%2F1%2FR%2Faaa.R="C5F1CC77" E%3A%2F1%2FR%2FbiasCorrect.R="65AE66E1" E%3A%2F1%2FR%2FextractPeriod.R="720670A7" diff --git a/.Rproj.user/shared/notebooks/paths b/.Rproj.user/shared/notebooks/paths new file mode 100644 index 0000000..7cc155f --- /dev/null +++ b/.Rproj.user/shared/notebooks/paths @@ -0,0 +1,5 @@ +/Users/yuanchaoxu/Documents/GitHub/hyfo/.travis.yml="3AC61E6F" +/Users/yuanchaoxu/Documents/GitHub/hyfo/NAMESPACE="11200187" +/Users/yuanchaoxu/Documents/GitHub/hyfo/NEWS="40156A70" +/Users/yuanchaoxu/Documents/GitHub/hyfo/cran-comments.md="E91D33FC" +/Users/yuanchaoxu/Documents/GitHub/hyfo/man/getSpatialMap_mat.Rd="5D3771F5" diff --git a/.gitignore b/.gitignore index f421bc6..36e034b 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,4 @@ Network Trash Folder Temporary Items .apdisk inst/doc +.Rproj.user diff --git a/.travis.yml b/.travis.yml index 71a9e19..8e44625 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,12 @@ env: global: - NOT_CRAN = true before_install: - echo "options(repos = c(CRAN='http://cran.rstudio.com'))" > ~/.Rprofile + - R -q -e 'remotes::install_github("Rdatatable/data.table") + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install llvm && + export PATH="/usr/local/opt/llvm/bin:$PATH" && + export LDFLAGS="-L/usr/local/opt/llvm/lib" && + export CFLAGS="-I/usr/local/opt/llvm/include"; fi +# echo "options(repos = c(CRAN='https://cran.rstudio.com'))" > ~/.Rprofile # - sudo apt-get autoclean # - sudo aptitude install libgdal-dev apt_packages: @@ -26,6 +31,12 @@ apt_packages: # - libhdf5-dev # - libhdf5-serial-dev # - libgdal-dev -# - libgdal1-dev -r_binary_packages: - - rgdal \ No newline at end of file + - libgdal1-dev + - libgeos-dev + - libproj0 + +#r_binary_packages: +# - sf +# - data.table +#r_packages: +# - data.table \ No newline at end of file diff --git a/CRAN-SUBMISSION b/CRAN-SUBMISSION new file mode 100644 index 0000000..6cc9f65 --- /dev/null +++ b/CRAN-SUBMISSION @@ -0,0 +1,3 @@ +Version: 1.4.5 +Date: 2023-08-12 14:09:46 UTC +SHA: 27329d85193217a33e104c30a29bb1b154d91f0c diff --git a/DESCRIPTION b/DESCRIPTION index 51a25d6..a7ee95c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,40 +1,41 @@ Package: hyfo Type: Package Title: Hydrology and Climate Forecasting -Version: 1.3.6 -Date: 2015-12-10 +Version: 1.4.6 +Date: 2023-8-15 Authors@R: person("Yuanchao", "Xu", email = "xuyuanchao37@gmail.com", role = c("aut", "cre")) Description: Focuses on data processing and visualization in hydrology and - climate forecasting. Main function includes data extraction, data downscaling, data - resampling, gap filler of precipitation, bias correction of forecasting data, flexible - time series plot, and spatial map generation. It is a good pre-processing and - post-processing tool for hydrological and hydraulic modellers. + climate forecasting. Main function includes data extraction, data downscaling, + data resampling, gap filler of precipitation, bias correction of forecasting + data, flexible time series plot, and spatial map generation. It is a good pre- + processing and post-processing tool for hydrological and hydraulic modellers. License: GPL-2 -Depends: +Depends: R (>= 3.1.0), stats (>= 3.1.3), utils(>= 3.1.3), -Imports: +Imports: ggplot2 (>= 1.0.1), reshape2 (>= 1.4.1), zoo (>= 1.7-12), - rgdal (>= 0.8-16), + sf (>= 1.0-12), plyr (>= 1.8.3), moments (>= 0.14), lmom (>= 2.5), maps(>= 2.3-9), - maptools (>= 0.8-36), - rgeos (>= 0.3-8), + sp (>= 2.0-0), ncdf4 (>= 1.14.1), MASS (>= 7.3-39), - methods -Suggests: + methods, + data.table +Suggests: gridExtra, knitr, rmarkdown VignetteBuilder: knitr LazyData: true -URL: http://yuanchao-xu.github.io/hyfo/ +URL: https://yuanchao-xu.github.io/hyfo/ BugReports: https://github.com/Yuanchao-Xu/hyfo/issues Repository: CRAN +RoxygenNote: 7.2.3 diff --git a/NAMESPACE b/NAMESPACE index 41cfe53..44d3147 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,4 +1,4 @@ -# Generated by roxygen2 (4.1.1): do not edit by hand +# Generated by roxygen2: do not edit by hand export(applyBiasFactor) export(biasCorrect) @@ -33,14 +33,15 @@ export(writeNcdf) exportClasses(biasFactor) import(ggplot2) import(maps) -import(maptools) import(ncdf4) import(plyr) -import(rgdal) -import(rgeos) +import(sf) +import(sp) importFrom(MASS,fitdistr) +importFrom(data.table,rbindlist) importFrom(grDevices,rainbow) importFrom(lmom,samlmu) +importFrom(methods,is) importFrom(methods,new) importFrom(methods,setClass) importFrom(methods,setGeneric) @@ -48,6 +49,7 @@ importFrom(methods,setMethod) importFrom(moments,kurtosis) importFrom(moments,skewness) importFrom(reshape2,melt) +importFrom(sf,st_read) importFrom(stats,aggregate) importFrom(stats,coef) importFrom(stats,cor) diff --git a/NEWS b/NEWS index 8702cef..b05446b 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,80 @@ +hyfo 1.4.6 +========== +Date: 2023-8-15 + +- further update for the maptools dependency under instruction + + + +hyfo 1.4.5 +========== +Date: 2023-8-11 + +- update Rgdal dependencies +- change class() to is() to avoid cran notes + + + +hyfo 1.4.3 +========== +Date: 2020-8-26 + +- update sp object with CRS issues + + + +hyfo 1.4.2 +========== +Date: 2020-4-3 + +- change to work with new R version + + + +hyfo 1.4.1 +========== +Date: 2019-10-2 + +- changes made to loadNcdf for different types of NCDF files with different dimension names + + + +hyfo 1.4.0 +========== +Date: 2018-9-27 + +- "memberIndex" length zero bug fixed in getBiasFactor + + + +hyfo 1.3.9 +========== +Date: 2017-2-20 + +- apply data.table package to facilitate data processing + + + +hyfo 1.3.8 +========== +Date: 2017-1-8 + +- add changes to the new version of roxygen2 +- change biasCorrection's description, change default prThreshold to 0, since not every one is an expert and know how to set it, better keep the original unchanged. + + + +hyfo 1.3.7 +========== +Date: 2016-3-1 + +- add one more argument to plotTS, to cancel the marking of NA values. For some users, NA values are too many to be plotted. + + + hyfo 1.3.6 ========== -Date: 2015.12.15 +Date: 2015-12-15 - transfer from ncdf to ncdf4 - grepAndMatch created, for capturing dimension names. @@ -10,14 +84,19 @@ Date: 2015.12.15 - bug fixed for nc files without members. NOTE: ----- +==== - for hyfo$Data part, when load and write using ncdf4, there will be very little differences compared to the original, which cannot be addressed. If you first load an ncdf file, then write it, then load it again. The data part may have very little difference, less than 10E-5. +hyfo 1.4.4 +========== +Date: 2023-07-12 + +- change package from rgdal to sf due to retirement of rdgal hyfo 1.3.5 ========== -Date: 2015.12.6 +Date: 2015-12-6 - travis check passed, change the rgdal version from 0.9-3 back to 0.8-16 due to the lack of packages on travis ubuntu. - changed .yml file to fix the problem with No repository set, so cyclic dependency check skipped. @@ -28,7 +107,7 @@ Date: 2015.12.6 hyfo 1.3.3 ========== -Date: 2015.11.27 +Date: 2015-11-27 - Delete readData_folder, since it's only windows based, add information to get special version for windows users. - travis test added. @@ -36,7 +115,7 @@ Date: 2015.11.27 hyfo 1.3.2 ========== -Date: 2015.11.7 +Date: 2015-11-7 - bug fixed about getPreciBar, signature('data.frame') - vignettes updated about bug and hided the warning information. @@ -46,7 +125,7 @@ Date: 2015.11.7 hyfo 1.3.1 ========== -Date: 2015.11.3 +Date: 2015-11-3 - new generic function biasCorrect, extractPeriod, resample, getAnnual, getPreciBar added. No need to designate input type any more, R will detect automatically. - coordinates conversion function extracted. @@ -56,7 +135,7 @@ Date: 2015.11.3 hyfo 1.2.9 ========== -Date: 2015.10.30 +Date: 2015-10-30 - new biasFactor S4 class added, to avoid set the input type every time. - operational bias correction has been changed to generic function. diff --git a/R/analyzeTS.R b/R/analyzeTS.R index c0546f8..0d70136 100644 --- a/R/analyzeTS.R +++ b/R/analyzeTS.R @@ -9,6 +9,7 @@ #' different outputs in the later multiplot using \code{plotTS_comb}. #' @param plot representing the plot type, there are two types, "norm" and "cum", "norm" gives an normal #' plot, and "cum" gives a cumulative plot. Default is "norm". +#' @param showNA A boolean representing whether the NA values should be marked, default is TRUE. #' @param x label for x axis. #' @param y label for y axis. #' @param title plot title. @@ -40,7 +41,7 @@ #' # and compare them using plotTS_comb. If all data are in one plot, there might be too messy. #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @references #' \itemize{ @@ -49,13 +50,14 @@ #' #' @import ggplot2 #' @importFrom reshape2 melt +#' @importFrom methods is #' @export -plotTS <- function(..., type = 'line', output = 'data', plot = 'norm', name = NULL, x = NULL, - y = NULL, title = NULL, list = NULL) { +plotTS <- function(..., type = 'line', output = 'data', plot = 'norm', name = NULL, showNA = TRUE, + x = NULL, y = NULL, title = NULL, list = NULL) { ## arrange input TS or TS list. if (is.null(list)) { list <- list(...) - if (!class(list[[1]]) == 'data.frame') { + if (!is(list[[1]])[1] == 'data.frame') { warning('Your input is probably a list, but you forget to add "list = " before it. Try again, or check help for more information.') } @@ -142,12 +144,16 @@ plotTS <- function(..., type = 'line', output = 'data', plot = 'norm', name = NU stop("No such plot type.") } + if (showNA == TRUE) { + missingVLayer <- with(TS, { + geom_point(data = data_plot[NAIndex, ], group = 1, size = 3, shape = 4, color = 'black') + }) + + mainLayer <- mainLayer + missingVLayer + } - missingVLayer <- with(TS, { - geom_point(data = data_plot[NAIndex, ], group = 1, size = 3, shape = 4, color = 'black') - }) - plotLayer <- mainLayer + secondLayer + missingVLayer + plotLayer <- mainLayer + secondLayer print(plotLayer) @@ -191,7 +197,7 @@ plotTS <- function(..., type = 'line', output = 'data', plot = 'norm', name = NU #' plotTS_comb(a1, a2) #' plotTS_comb(list = list(a1, a2), y = 'y axis', nrow = 2) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @references #' \itemize{ @@ -199,6 +205,7 @@ plotTS <- function(..., type = 'line', output = 'data', plot = 'norm', name = NU #' } #' @export #' @import ggplot2 +#' @importFrom data.table rbindlist plotTS_comb <- function(..., nrow = 1, type = 'line', list = NULL, x = 'Date', y = '', title = '', output = FALSE){ # In ploting the time series, since the data comes from outside of hyfo, @@ -208,15 +215,17 @@ plotTS_comb <- function(..., nrow = 1, type = 'line', list = NULL, x = 'Date', y if (!is.null(list)) { checkBind(list, 'rbind') - data_ggplot <- do.call('rbind', list) + #data_ggplot <- do.call('rbind', list) + data_ggplot <- rbindlist(list) } else { bars <- list(...) checkBind(bars, 'rbind') - data_ggplot <- do.call('rbind', bars) + #data_ggplot <- do.call('rbind', bars) + data_ggplot <- rbindlist(bars) } - if (!class(data_ggplot) == 'data.frame') { + if (!class(data_ggplot)[1] == 'data.table') { warning('Your input is probably a list, but you forget to add "list = " before it. Try again, or check help for more information.') } else if (is.null(data_ggplot$name)) { @@ -270,14 +279,14 @@ plotTS_comb <- function(..., nrow = 1, type = 'line', list = NULL, x = 'Date', y #' dis <- seq(1, 100) #' getLMom(dis) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export #' @references #' #' \itemize{ #' \item J. R. M. Hosking (2015). L-moments. R package, version 2.5. URL: -#' http://CRAN.R-project.org/package=lmom. +#' https://CRAN.R-project.org/package=lmom. #' } #' #' @@ -304,17 +313,17 @@ getLMom <- function(dis){ #' dis <- seq(1, 100) #' getMoment(dis) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export #' @references #' #' \itemize{ #' \item Lukasz Komsta and Frederick Novomestky (2015). moments: Moments, cumulants, skewness, kurtosis and -#' related tests. R package version 0.14. http://CRAN.R-project.org/package=moments +#' related tests. R package version 0.14. https://CRAN.R-project.org/package=moments #' #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @importFrom moments skewness kurtosis diff --git a/R/biasCorrect(generic).R b/R/biasCorrect(generic).R index b087d5c..576051b 100644 --- a/R/biasCorrect(generic).R +++ b/R/biasCorrect(generic).R @@ -24,7 +24,9 @@ #' @param preci If the precipitation is biascorrected, then you have to assign \code{preci = TRUE}. Since for #' precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. #' Default is FALSE, refer to details. -#' @param prThreshold The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm). +#' @param prThreshold The minimum value that is considered as a non-zero precipitation. Default to 0 (assuming mm). If you want +#' to use precipitation biascorrect, you should consider carefully how to set this threshold, usually is 1. But you +#' can try with different numbers to see the results. #' @param extrapolate When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs. #' If 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'. #' @details @@ -75,7 +77,7 @@ #' (e.g. precipitation, wind speed, etc.) because values out of the variable range could be obtained #' (e.g. negative wind speeds...) #' -#' \strong{scaling} +#' \strong{scaling} #' #' This method consists on scaling the simulation with the difference (additive) or quotient (multiplicative) #' between the observed and simulated means in the train period. The \code{additive} or \code{multiplicative} @@ -183,7 +185,7 @@ #' #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' #' @references @@ -212,14 +214,18 @@ setGeneric('biasCorrect', function(frc, hindcast, obs, method = 'scaling', scale standardGeneric('biasCorrect') }) -#' @describeIn biasCorrect + +# Since in new version of roxygen2, describeIn was changed, https://stackoverflow.com/questions/24246594/automatically-document-all-methods-of-an-s4-generic-using-roxygen2 +# so use rdname instead +#' @rdname biasCorrect +#' setMethod('biasCorrect', signature('data.frame', 'data.frame', 'data.frame'), function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) { result <- biasCorrect.TS(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) return(result) }) -#' @describeIn biasCorrect +#' @rdname biasCorrect setMethod('biasCorrect', signature('list', 'list', 'list'), function(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) { result <- biasCorrect.list(frc, hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) @@ -280,6 +286,9 @@ biasCorrect.list <- function(frc, hindcast, obs, method, scaleType, preci, prThr # Now real bias correction is executed. + # for some forcasts, they have results from different models or scenarios, if so + # there will be a dimension called member + memberIndex <- grepAndMatch('member', attributes(frcData)$dimensions) # For dataset that has a member part diff --git a/R/case_anarbe.R b/R/case_anarbe.R index 48abdce..cc347c9 100644 --- a/R/case_anarbe.R +++ b/R/case_anarbe.R @@ -12,26 +12,28 @@ #' folder <- strsplit(file, '1999')[[1]][1] #' a <- collectData_csv_anarbe(folder) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @references #' #' \itemize{ #' \item http://meteo.navarra.es/estaciones/mapadeestaciones.cfm #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @source http://meteo.navarra.es/estaciones/mapadeestaciones.cfm #' @export #' @importFrom utils tail +#' @importFrom data.table rbindlist collectData_csv_anarbe <- function(folderName, output = TRUE){ fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE) data <- lapply(fileNames, readColumn_csv_anarbe) - data <- do.call('rbind', data) + data <- rbindlist(data) data <- data[, 1:2] - data[, 1] <- as.Date(data[, 1], format = '%d/%m/%Y') + # cus the special structure of data.tables, here should be data[[1]], instead of data[, 1] + data[, 1] <- as.Date(data[[1]], format = '%d/%m/%Y') #newFileName <- file.choose(new = T) #write.table(data_new,file=newFileName,row.names = F, col.names = F,sep=',') @@ -258,14 +260,14 @@ collectData_excel_anarbe <- function(folderName, keyword = NULL, output = TRUE){ #' } #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @references #' #' \itemize{ #' \item http://www4.gipuzkoa.net/oohh/web/esp/02.asp #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @@ -273,6 +275,7 @@ collectData_excel_anarbe <- function(folderName, keyword = NULL, output = TRUE){ #' @return The collected data from different txt files. #' @export #' @importFrom utils tail +#' @importFrom data.table rbindlist collectData_txt_anarbe <- function(folderName, output = TRUE, rangeWord = c('Ene ', -1, 'Total ', -6)){ #All the code should be ASCII encode, so there should be no strange symbol. @@ -294,7 +297,7 @@ collectData_txt_anarbe <- function(folderName, output = TRUE, rangeWord = c('Ene data <- lapply(fileNames, FUN = readColumn_txt_anarbe, rangeWord = rangeWord) - data <- do.call('rbind', data) + data <- rbindlist(data) a <- unlist(strsplit(folderName, '\\\\|/')) tarName <- tail(a, 2)[1] @@ -350,7 +353,7 @@ anarbe_txt <- function(dataset, x1, x2){ #' #' \itemize{ #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @importFrom utils read.fwf diff --git a/R/classes.R b/R/classes.R index bbcbe44..dccb495 100644 --- a/R/classes.R +++ b/R/classes.R @@ -111,7 +111,7 @@ setClass("biasFactor.hyfo", representation(lonLatDim = 'integer'), contains = 'b # #' @exportClass # setClass("hyfo", representation(varName = "character", xyCoords = 'list', Dates = 'list', # Data = 'array', Loaded = 'character'), -# prototype(Loaded = 'by hyfo package, http://yuanchao-xu.github.io/hyfo/'), +# prototype(Loaded = 'by hyfo package, https://yuanchao-xu.github.io/hyfo/'), # validity = checkHyfo) # # diff --git a/R/collectData.R b/R/collectData.R index c967b4b..229c43c 100644 --- a/R/collectData.R +++ b/R/collectData.R @@ -16,9 +16,10 @@ #' #' a <- collectData(folder, fileType = 'csv', range = c(10, 20, 1,2)) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export +#' @importFrom data.table rbindlist collectData <- function(folderName, fileType = NULL, range = NULL, sheetIndex = 1){ message('All the files in the folder should have the same format') @@ -36,7 +37,7 @@ collectData <- function(folderName, fileType = NULL, range = NULL, sheetIndex = if (length(fileNames) == 0) stop('No csv file in the folder.') data <- lapply(fileNames, readCsv, range = range) - data <- do.call('rbind', data) + data <- rbindlist(data) } else if (fileType == 'txt') { fileNames <- list.files(folderName, pattern = '*.txt', full.names = TRUE) @@ -95,7 +96,7 @@ readTxt <- function(fileName, range){ #' @importFrom utils read.csv #' @references #' R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. readCsv <- function(fileName, range){ data <- read.csv(fileName, skip = range[1] - 1, header = FALSE) diff --git a/R/dataDocument.R b/R/dataDocument.R index 28ba822..0899028 100644 --- a/R/dataDocument.R +++ b/R/dataDocument.R @@ -53,7 +53,7 @@ #' testCat -#' @format A catchment file generated by library rgdal. +#' @format A catchment file generated by library sf. #' \describe{ #' \item{class}{Formal class 'SpatialPolygonsDataFrame' [package "sp"] with 5 slots} #' ... diff --git a/R/extractPeriod(generic).R b/R/extractPeriod(generic).R index 7976c73..e1ced40 100644 --- a/R/extractPeriod(generic).R +++ b/R/extractPeriod(generic).R @@ -76,14 +76,14 @@ #' dataframe_new <- extractPeriod(dataframe, month = c(12,1,2), year = 1995) #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @importFrom zoo as.Date #' @references #' #' \itemize{ #' \item Achim Zeileis and Gabor Grothendieck (2005). zoo: S3 Infrastructure for Regular and Irregular Time -#' Series. Journal of Statistical Software, 14(6), 1-27. URL http://www.jstatsoft.org/v14/i06/ +#' Series. Journal of Statistical Software, 14(6), 1-27. URL https://www.jstatsoft.org/v14/i06/ #' } #' #' @export @@ -93,7 +93,7 @@ setGeneric('extractPeriod', function(data, startDate = NULL, endDate = NULL, com }) -#' @describeIn extractPeriod +#' @rdname extractPeriod #' @importFrom methods setMethod setMethod('extractPeriod', signature('data.frame'), function(data, startDate, endDate, commonPeriod, year, month) { @@ -105,7 +105,7 @@ setMethod('extractPeriod', signature('data.frame'), }) -#' @describeIn extractPeriod +#' @rdname extractPeriod #' @importFrom methods setMethod setMethod('extractPeriod', signature('list'), function(data, startDate, endDate, commonPeriod, year, month) { @@ -115,7 +115,8 @@ setMethod('extractPeriod', signature('list'), month = month) } else if (is.null(startDate) & is.null(endDate) & commonPeriod == TRUE) { - Dates <- lapply(datalist, extractPeriod_getDate) + Dates <- lapply(datalist, extractPeriod_getDate) + # Here don't know why rbindlist cannot work, change back to do.call Dates <- do.call('rbind', Dates) startDate <- as.Date(max(Dates[, 1])) @@ -223,7 +224,7 @@ extractPeriod_dataframe <- function(dataframe, startDate, endDate, year = NULL, #' #' \itemize{ #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' diff --git a/R/fillGap.R b/R/fillGap.R index 5ad64a8..506df44 100644 --- a/R/fillGap.R +++ b/R/fillGap.R @@ -47,7 +47,7 @@ #' a3 <- fillGap(a1, corPeriod = 'monthly') #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @references #' Gap fiiling method based on correlation and linear regression. @@ -135,7 +135,7 @@ fillGap_column <- function(i, data, corOrder, lmCoef) { #' #' \itemize{ #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @@ -174,7 +174,7 @@ fillGap_cor <- function(data, corPeriod = 'daily', Date) { #' @importFrom stats coef lm #' @references #' R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' fillGap_lmCoef <- function(data, corOrder) { l <- dim(data)[2] diff --git a/R/getAnnual(generic).R b/R/getAnnual(generic).R index b00cbd1..c6c96b3 100644 --- a/R/getAnnual(generic).R +++ b/R/getAnnual(generic).R @@ -33,7 +33,7 @@ #' getAnnual(a3) #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export #' @importFrom methods setGeneric @@ -44,7 +44,7 @@ #' \item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software, #' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/. #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @@ -53,7 +53,7 @@ setGeneric('getAnnual', function(data, output = 'series', minRecords = 355, standardGeneric('getAnnual') }) -#' @describeIn getAnnual +#' @rdname getAnnual #' @importFrom methods setMethod setMethod('getAnnual', signature('data.frame'), function(data, output, minRecords, ...) { @@ -62,7 +62,7 @@ setMethod('getAnnual', signature('data.frame'), return(result) }) -#' @describeIn getAnnual +#' @rdname getAnnual #' @importFrom methods setMethod setMethod('getAnnual', signature('list'), function(data, output, minRecords, ...) { @@ -71,6 +71,7 @@ setMethod('getAnnual', signature('list'), return(result) }) +#' @importFrom data.table rbindlist getAnnual.TS <- function(dataframe) { Date <- as.POSIXlt(dataframe[, 1]) # Calculate how many gauging stations. @@ -82,7 +83,7 @@ getAnnual.TS <- function(dataframe) { getAnnual_dataframe(dataframe_new) }) - data <- do.call('rbind', data) + data <- rbindlist(data) # After rbind, factor level has to be reassigned in order to be well plotted. data$Year <- factor(data$Year, levels = sort(unique(data$Year)), ordered = TRUE) rownames(data) <- NULL @@ -91,9 +92,10 @@ getAnnual.TS <- function(dataframe) { } +#' @importFrom data.table rbindlist getAnnual.list <- function(datalist) { data <- lapply(datalist, FUN = getAnnual_dataframe) - data <- do.call('rbind', data) + data <- rbindlist(data) # After rbind, factor level has to be reassigned in order to be well plotted. data$Year <- factor(data$Year, levels = sort(unique(data$Year)), ordered = TRUE) rownames(data) <- NULL diff --git a/R/getEnsemble.R b/R/getEnsemble.R index a467a3a..83f7320 100644 --- a/R/getEnsemble.R +++ b/R/getEnsemble.R @@ -98,7 +98,7 @@ #' b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, buffer = 30) #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' #' @importFrom reshape2 melt @@ -330,7 +330,7 @@ getHisEnsem <- function (TS, example, interval = 365, buffer = 0, plot = 'norm', #' b <- getFrcEnsem(nc, coord = c(-1.4, 43.2)) #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @return A ensemble time series extracted from forecating data. #' @@ -481,11 +481,12 @@ getFrcEnsem <- function(dataset, cell = 'mean', plot = 'norm', output = 'data', #' getEnsem_comb(b1, b2) #' getEnsem_comb(list = list(b1, b2), nrow = 2) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' #' @export #' @import ggplot2 +#' @importFrom data.table rbindlist #' @references #' #' \itemize{ @@ -503,15 +504,15 @@ getEnsem_comb <- function(..., list = NULL, nrow = 1, legend = TRUE, x = '', y = if (!is.null(list)) { checkBind(list, 'rbind') - data_ggplot <- do.call('rbind', list) + data_ggplot <- rbindlist(list) } else { plots <- list(...) checkBind(plots, 'rbind') - data_ggplot <- do.call('rbind', plots) + data_ggplot <- rbindlist(plots) } #data_ggplot$name <- factor(data_ggplot$name, levels = data_ggplot$name, ordered = TRUE) - if (!class(data_ggplot) == 'data.frame') { + if (!class(data_ggplot)[1] == 'data.table') { warning('Your input is probably a list, but you forget to add "list = " before it. Try again, or check help for more information.') } else if (is.null(data_ggplot$name)) { diff --git a/R/getPreciBar(generic).R b/R/getPreciBar(generic).R index 3b2d994..e1e07f2 100644 --- a/R/getPreciBar(generic).R +++ b/R/getPreciBar(generic).R @@ -55,7 +55,7 @@ #' a <- getPreciBar(TS, method = 'spring', info = TRUE) #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @references #' @@ -65,7 +65,7 @@ #' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/. #' \item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @@ -77,7 +77,7 @@ setGeneric('getPreciBar', function(data, method, cell = 'mean', output = 'data', standardGeneric('getPreciBar') }) -#' @describeIn getPreciBar +#' @rdname getPreciBar setMethod('getPreciBar', signature('list'), function(data, method, cell, output, name, plotRange, member, omitNA, info, ...) { TS <- getPreciBar.list(data, cell, member) @@ -91,7 +91,7 @@ setMethod('getPreciBar', signature('list'), return(result) }) -#' @describeIn getPreciBar +#' @rdname getPreciBar setMethod('getPreciBar', signature('data.frame'), function(data, method, cell, output, name, plotRange, member, omitNA, info, ...) { Date <- as.POSIXlt(TS[, 1]) @@ -403,10 +403,11 @@ getPreciBar.plot <- function(TS, method, output, name, plotRange, omitNA, info, #' #' getPreciBar_comb(b1, b2) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export #' @import ggplot2 +#' @importFrom data.table rbindlist #' @references #' #' \itemize{ @@ -415,15 +416,15 @@ getPreciBar.plot <- function(TS, method, output, name, plotRange, omitNA, info, #' getPreciBar_comb <- function(..., list = NULL, nrow = 1, x = '', y = '', title = '', output = FALSE) { if (!is.null(list)) { - data_ggplot <- do.call('rbind', list) + data_ggplot <- rbindlist(list) } else { bars <- list(...) checkBind(bars, 'rbind') - data_ggplot <- do.call('rbind', bars) + data_ggplot <- rbindlist(bars) } - if (!class(data_ggplot) == 'data.frame') { + if (!class(data_ggplot)[1] == 'data.table') { warning('Your input is probably a list, but you forget to add "list = " before it. Try again, or check help for more information.') } else if (is.null(data_ggplot$Name)) { diff --git a/R/getSpatialMap.R b/R/getSpatialMap.R index 399167a..c0abcf3 100644 --- a/R/getSpatialMap.R +++ b/R/getSpatialMap.R @@ -220,17 +220,18 @@ getSpatialMap <- function(dataset, method = NULL, member = 'mean', ...) { #' } #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export -#' @import ggplot2 plyr maps maptools rgeos +#' @import ggplot2 plyr maps sp sf #' @importFrom stats median +#' @importFrom methods is #' @importFrom reshape2 melt #' @references #' #' \itemize{ #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' #' \item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software, #' 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/. @@ -240,13 +241,12 @@ getSpatialMap <- function(dataset, method = NULL, member = 'mean', ...) { #' #' \item Original S code by Richard A. Becker and Allan R. Wilks. R version by Ray Brownrigg. Enhancements #' by Thomas P Minka (2015). maps: Draw Geographical Maps. R package version -#' 2.3-11. http://CRAN.R-project.org/package=maps +#' 2.3-11. https://CRAN.R-project.org/package=maps #' -#' \item Roger Bivand and Nicholas Lewin-Koh (2015). maptools: Tools for Reading and Handling Spatial -#' Objects. R package version 0.8-36. http://CRAN.R-project.org/package=maptools +#' \item Pebesma, Edzer, and Roger Bivand. 2023a. Sp: Classes and Methods for Spatial Data. https://CRAN.R-project.org/package=sp. #' #' \item Roger Bivand and Colin Rundel (2015). rgeos: Interface to Geometry Engine - Open Source (GEOS). R -#' package version 0.3-11. http://CRAN.R-project.org/package=rgeos +#' package version 0.3-11. https://CRAN.R-project.org/package=sf #' #' } #' @@ -260,7 +260,7 @@ getSpatialMap_mat <- function(matrix, title_d = NULL, catchment = NULL, point = checkWord <- c('lon', 'lat', 'z', 'value') if (is.null(attributes(matrix)$dimnames)) { stop('Input matrix is incorrect, check help to know how to get the matrix.') - } else if (!is.null(catchment) & class(catchment) != "SpatialPolygonsDataFrame") { + } else if (!is.null(catchment) & is(catchment)[1] != "SpatialPolygonsDataFrame") { stop('Catchment format is incorrect, check help to get more details. ') } else if (!is.null(point) & any(is.na(match(checkWord, attributes(point)$names)))) { stop('point should be a dataframe with colnames "lon, lat, z, value".') @@ -405,7 +405,7 @@ getSpatialMap_mat <- function(matrix, title_d = NULL, catchment = NULL, point = #' #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @details #' For \code{getSpatialMap_comb}, the maps to be compared should be with same size and resolution, @@ -415,6 +415,8 @@ getSpatialMap_mat <- function(matrix, title_d = NULL, catchment = NULL, point = #' #' @export #' @import ggplot2 maps +#' @importFrom data.table rbindlist +#' @importFrom methods is #' @references #' #' \itemize{ @@ -425,14 +427,14 @@ getSpatialMap_comb <- function(..., list = NULL, nrow = 1, x = '', y = '', title if (!is.null(list)) { - data_ggplot <- do.call('rbind', list) + data_ggplot <- rbindlist(list) } else { maps <- list(...) checkBind(maps, 'rbind') - data_ggplot <- do.call('rbind', maps) + data_ggplot <- rbindlist(maps) } - if (!class(data_ggplot) == 'data.frame') { + if (!is(data_ggplot)[1] == 'data.frame') { warning('Your input is probably a list, but you forget to add "list = " before it. Try again, or check help for more information.') } else if (is.null(data_ggplot$Name)) { diff --git a/R/list2dataframe.R b/R/list2dataframe.R index bc54620..175db02 100644 --- a/R/list2dataframe.R +++ b/R/list2dataframe.R @@ -16,7 +16,7 @@ #' #' dataframe <- list2Dataframe(datalist_new) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export list2Dataframe <- function(datalist) { diff --git a/R/multi-biasCorrect(generic).R b/R/multi-biasCorrect(generic).R index c846307..d740f94 100644 --- a/R/multi-biasCorrect(generic).R +++ b/R/multi-biasCorrect(generic).R @@ -147,7 +147,7 @@ #' #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' #' @references @@ -179,7 +179,7 @@ setGeneric('getBiasFactor', function(hindcast, obs, method = 'scaling', scaleTyp standardGeneric('getBiasFactor') }) -#' @describeIn getBiasFactor +#' @rdname getBiasFactor setMethod('getBiasFactor', signature('data.frame', 'data.frame'), function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) { result <- getBiasFactor.TS(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) @@ -188,7 +188,7 @@ setMethod('getBiasFactor', signature('data.frame', 'data.frame'), # This is for the grid file from downscaleR -#' @describeIn getBiasFactor +#' @rdname getBiasFactor #' @importFrom methods new setMethod('getBiasFactor', signature('list', 'list'), function(hindcast, obs, method, scaleType, preci, prThreshold, extrapolate) { @@ -335,7 +335,7 @@ setMethod('getBiasFactor', signature('list', 'list'), #' #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' #' @references @@ -362,7 +362,7 @@ setGeneric('applyBiasFactor', function(frc, biasFactor, obs = NULL) { standardGeneric('applyBiasFactor') }) -#' @describeIn applyBiasFactor +#' @rdname applyBiasFactor #' @importFrom methods setMethod setMethod('applyBiasFactor', signature('data.frame', 'biasFactor'), function(frc, biasFactor, obs) { @@ -370,7 +370,7 @@ setMethod('applyBiasFactor', signature('data.frame', 'biasFactor'), return(result) }) -#' @describeIn applyBiasFactor +#' @rdname applyBiasFactor #' @importFrom methods setMethod setMethod('applyBiasFactor', signature('list', 'biasFactor.hyfo'), function(frc, biasFactor, obs) { @@ -437,11 +437,14 @@ getBiasFactor.list <- function(hindcast, obs, method, scaleType, preci, prThresh # Now real bias correction is executed. + # for some forcasts, they have results from different models or scenarios, if so + # there will be a dimension called member + memberIndex <- grepAndMatch('member', attributes(hindcastData)$dimensions) # For dataset that has a member part - if (!is.na(memberIndex)) { + if (length(memberIndex) != 0) { hindcastData <- adjustDim(hindcastData, ref = c('lon', 'lat', 'time', 'member')) @@ -573,7 +576,7 @@ applyBiasFactor.list <- function(frc, biasFactor, obs) { memberIndex <- grepAndMatch('member', attributes(frcData)$dimensions) # For dataset that has a member part - if (!is.na(memberIndex)) { + if (length(memberIndex) != 0) { # check if frcData and hindcastData has the same dimension and length. if (calcuDim(frcData, dim = 'member') != memberDim) { stop('frc data has different member number from hindcast.') diff --git a/R/ncdf.R b/R/ncdf.R index aad5254..1180e52 100644 --- a/R/ncdf.R +++ b/R/ncdf.R @@ -12,7 +12,7 @@ #' # Then if you don't know the variable name, you can use \code{getNcdfVar} to get variable name #' varname <- getNcdfVar(filePath) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @import ncdf4 #' @references @@ -20,7 +20,7 @@ #' \itemize{ #' \item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or #' Earlier) Format Data Files. R package version 1.14.1. -#' http://CRAN.R-project.org/package=ncdf4 +#' https://CRAN.R-project.org/package=ncdf4 #' } #' #' @@ -72,7 +72,7 @@ getNcdfVar <- function(filePath) { #' \itemize{ #' \item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or #' Earlier) Format Data Files. R package version 1.14.1. -#' http://CRAN.R-project.org/package=ncdf4 +#' https://CRAN.R-project.org/package=ncdf4 #' #' \item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package #' version 2.2-6. http://meteo.unican.es/ecoms-udg @@ -94,6 +94,7 @@ loadNcdf <- function(filePath, varname, tz = 'GMT', ...) { # Only deals with the most common dimensions, futher dimensions will be added in future. dimIndex <- grepAndMatch(c('lon', 'lat', 'time', 'member'), dimNames) + if (length(dimIndex) < 3) dimIndex <- grepAndMatch(c('x', 'y', 'time', 'member'), dimNames) if (length(dimIndex) < 3) stop('Your file has less than 3 dimensions.') # First needs to identify the variable name, load the right data @@ -148,7 +149,7 @@ loadNcdf <- function(filePath, varname, tz = 'GMT', ...) { if (!is.na(dimIndex[4])) gridData$Members <- var$dim[[dimIndex[4]]]$vals - gridData$Loaded <- 'by hyfo package, http://yuanchao-xu.github.io/hyfo/' + gridData$Loaded <- 'by hyfo package, https://yuanchao-xu.github.io/hyfo/' nc_close(nc) output <- downscaleNcdf(gridData, ...) @@ -184,7 +185,7 @@ loadNcdf <- function(filePath, varname, tz = 'GMT', ...) { #' nc1 <- downscaleNcdf(nc, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7)) #' nc2 <- downscaleNcdf(nc, year = 2005, month = 3:8, lon = c(-2, -0.5), lat = c(43.2, 43.7)) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export #' @references @@ -324,6 +325,7 @@ downscaleNcdf <- function(gridData, year = NULL, month = NULL, lon = NULL, lat = #' seconds or days and so on. If not specified, the function will pick up the possible largest #' time units from \code{c('weeks', 'days', 'hours', 'mins', 'secs')} #' @param version ncdf file versions, default is 3, if 4 is chosen, output file will be foreced to version 4. +#' @param tz time zone, default is "GMT" #' @return An NetCDF version 3 file. #' @examples #' # First open the test NETcDF file. @@ -337,9 +339,12 @@ downscaleNcdf <- function(gridData, year = NULL, month = NULL, lon = NULL, lat = #' #' # Then write to your work directory #' +#' \dontrun{ #' writeNcdf(nc, 'test.nc') +#' } #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export #' @import ncdf4 @@ -348,7 +353,7 @@ downscaleNcdf <- function(gridData, year = NULL, month = NULL, lon = NULL, lat = #' \itemize{ #' \item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or #' Earlier) Format Data Files. R package version 1.14.1. -#' http://CRAN.R-project.org/package=ncdf4 +#' https://CRAN.R-project.org/package=ncdf4 #' #' \item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package #' version 2.2-6. http://meteo.unican.es/ecoms-udg @@ -432,7 +437,7 @@ writeNcdf <- function(gridData, filePath, missingValue = 1e20, tz = 'GMT', units # This part has to be put ncatt_put(nc, 0, "Conventions","CF-1.4") - ncatt_put(nc, 0, 'WrittenBy', 'hyfo(http://yuanchao-xu.github.io/hyfo/)') + ncatt_put(nc, 0, 'WrittenBy', 'hyfo(https://yuanchao-xu.github.io/hyfo/)') #data <- aperm(gridData$Data, dimIndex) no need to do this, in the process above # when you define the dimlist, the order of the dimension was fixed. @@ -448,7 +453,9 @@ getTimeUnit <- function(dates) { output <- NULL for (unit in units) { time <- difftime(dates, dates[1], units = unit) - rem <- sapply(time, function(x) x%%1) + # previously it worked like below, then new version came + # rem <- sapply(time, function(x) x%%1) + rem <- as.numeric(time) %% 1 if (!any(rem != 0)) { output <- unit break @@ -462,7 +469,7 @@ getTimeUnit <- function(dates) { #' @import ncdf4 #' @references #' David Pierce (2014). ncdf: Interface to Unidata netCDF data files. R package version 1.6.8. -#' http://CRAN.R-project.org/package=ncdf +#' https://CRAN.R-project.org/package=ncdf getExtralDim <- function(...) { dimList <- list(...) diff --git a/R/readfolders.R b/R/readfolders.R index ecee624..940a82f 100644 --- a/R/readfolders.R +++ b/R/readfolders.R @@ -24,7 +24,7 @@ readData_folder <- function(folderName, keyword) { #' #' \itemize{ #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' diff --git a/R/resample(generic).R b/R/resample(generic).R index d54ba69..ad974f5 100644 --- a/R/resample(generic).R +++ b/R/resample(generic).R @@ -38,7 +38,7 @@ #' nc_new <- resample(nc, 'day2mon') #' #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' #' @export #' @importFrom stats aggregate @@ -46,7 +46,7 @@ #' #' \itemize{ #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' setGeneric('resample', function(data, method) { @@ -54,14 +54,14 @@ setGeneric('resample', function(data, method) { }) -#' @describeIn resample +#' @rdname resample setMethod('resample', signature('data.frame'), function(data, method) { result <- resample.TS(data, method) return(result) }) -#' @describeIn resample +#' @rdname resample setMethod('resample', signature('list'), function(data, method) { result <- resample.list(data, method) @@ -71,6 +71,7 @@ setMethod('resample', signature('list'), #' @importFrom stats aggregate +#' @importFrom data.table rbindlist resample.TS <- function(TS, method) { if (length(TS) != 2) { stop('Time series not correct, should be two columns, Date and value.') @@ -84,7 +85,7 @@ resample.TS <- function(TS, method) { data <- apply(TS, MARGIN = 1 , FUN = mon2day) - output <- do.call('rbind', data) + output <- rbindlist(data) } else if (method == 'day2mon') { Date <- as.Date(TS[, 1]) year <- format(Date, format = '%Y') @@ -133,7 +134,7 @@ resample.list <- function(hyfo, method) { #' #' \itemize{ #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' mon2day <- function(monData) { diff --git a/R/shp2cat.R b/R/shp2cat.R index d84977c..b86372a 100644 --- a/R/shp2cat.R +++ b/R/shp2cat.R @@ -2,25 +2,25 @@ #' @param filePath A string representing the path of the shape file. #' @return A catchment object can be used in \code{getSpatialMap()}. #' @export -#' @details This function is based on the package \code{rgdal} and \code{sp}, and the output comes from the package +#' @details This function is based on the package \code{sf} and \code{sp}, and the output comes from the package #' \code{sp} #' @examples #' #open internal file #' file <- system.file("extdata", "testCat.shp", package = "hyfo") #' catchment <- shp2cat(file) #' -#' # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ #' -#' @import rgdal +#' @importFrom sf st_read #' @importFrom utils tail #' @references #' #' \itemize{ #' \item Roger Bivand, Tim Keitt and Barry Rowlingson (2015). rgdal: Bindings for the Geospatial Data -#' Abstraction Library. R package version 1.0-4. http://CRAN.R-project.org/package=rgdal +#' Abstraction Library. R package version 1.0-4. https://CRAN.R-project.org/package=sf #' #' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. #' } #' #' @@ -46,6 +46,6 @@ shp2cat <- function(filePath) { } if (length(folderName) == 0) stop('No shape file found, make sure the shp file is selected.') - catchment <- readOGR(folderName, catName1) + catchment <- st_read(folderName, catName1) return(catchment) } diff --git a/R/startup.R b/R/startup.R index 5e3ad52..28a46f0 100644 --- a/R/startup.R +++ b/R/startup.R @@ -2,7 +2,7 @@ #' @importFrom utils packageDescription hyfoUpdates <- function(){ - page <- readLines('http://yuanchao-xu.github.io/hyfo/') + page <- readLines('https://yuanchao-xu.github.io/hyfo/') updatesLine <- grep('id=\\"updates"', page) versionLine <- updatesLine + 2 @@ -27,7 +27,7 @@ hyfoUpdates <- function(){ if (!is.na(info_line)) info_msg <- c(info_msg, info_line) } - install_msg <- 'More details on http://yuanchao-xu.github.io/hyfo/' + install_msg <- 'More details on https://yuanchao-xu.github.io/hyfo/' message_out <- paste(version_msg, paste(info_msg, collapse = '\n'), install_msg, sep = '\n') } else message_out <- NULL diff --git a/README.md b/README.md index dac2176..d4dea0c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # hyfo -[![Travis-CI Build Status](https://travis-ci.org/Yuanchao-Xu/hyfo.svg?branch=master)](https://travis-ci.org/Yuanchao-Xu/hyfo) -[![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/hyfo)](http://cran.r-project.org/package=hyfo) + +[![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/hyfo)](https://cran.r-project.org/package=hyfo) ## Installation @@ -18,7 +18,7 @@ install.packages("devtools") devtools::install_github("Yuanchao-Xu/hyfo") ``` -**Official Website is [http://yuanchao-xu.github.io/hyfo](http://yuanchao-xu.github.io/hyfo)** +**Official Website is [https://yuanchao-xu.github.io/hyfo/](https://yuanchao-xu.github.io/hyfo/)** hyfo is an R package, initially designed for the European Project EUPORIAS, and cooperated with DHI Denmark, which was then extended to other uses in hydrology, hydraulics and climate. @@ -26,9 +26,8 @@ This package mainly focuses on data process and visulization in hydrology and cl **If you feel hyfo is of a little help, please cite it as following:** -Xu, Yuanchao(2015). hyfo: Hydrology and Climate Forecasting R Package for Data Analysis and Visualization. Retrieved from http://yuanchao-xu.github.io/hyfo/ +Xu, Yuanchao(2020). hyfo: Hydrology and Climate Forecasting R Package for Data Analysis and Visualization. Retrieved from http://yuanchao-xu.github.io/hyfo/ -[Author in this corner](https://dk.linkedin.com/in/xuyuanchao37) diff --git a/cran-comments.md b/cran-comments.md new file mode 100644 index 0000000..858617d --- /dev/null +++ b/cran-comments.md @@ -0,0 +1,5 @@ +## R CMD check results + +0 errors | 0 warnings | 1 note + +* This is a new release. diff --git a/data/testCat.rda b/data/testCat.rda index 75fa187..4dffbaf 100644 Binary files a/data/testCat.rda and b/data/testCat.rda differ diff --git a/man/applyBiasFactor.Rd b/man/applyBiasFactor.Rd index f236ddc..8183ff1 100644 --- a/man/applyBiasFactor.Rd +++ b/man/applyBiasFactor.Rd @@ -1,6 +1,5 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multi-biasCorrect(generic).R -\docType{methods} \name{applyBiasFactor} \alias{applyBiasFactor} \alias{applyBiasFactor,data.frame,biasFactor-method} @@ -14,13 +13,13 @@ applyBiasFactor(frc, biasFactor, obs = NULL) \S4method{applyBiasFactor}{list,biasFactor.hyfo}(frc, biasFactor, obs = NULL) } \arguments{ -\item{frc}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, +\item{frc}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, representing the frc data. Check details for more information.} \item{biasFactor}{a file containing all the information of the calibration, will be applied to different forecasts.} -\item{obs}{for some methods, observation input is necessary. obs is a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, +\item{obs}{for some methods, observation input is necessary. obs is a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, representing the observation data. Default value is NULL.} } \description{ @@ -28,7 +27,7 @@ When you do multi/operational/real time bias correction. It's too expensive to input hindcast and obs every time. Especially when you have a long period of hindcast and obs, but only a short period of frc, it's too unecessary to read and compute hindcast and obs everytime. Therefore, biasFactor is designed. Using \code{getBiasFactor}, you can -get the biasFactor with hindcast and observation, then you can use \code{applyBiasFactor} to +get the biasFactor with hindcast and observation, then you can use \code{applyBiasFactor} to apply the biasFactor to different forecasts. } \details{ @@ -42,40 +41,35 @@ period of hindcast and obs, and apply that factor to different frc. For example, -You have 10 years of hindcast and observation. you want to do bias correction for some +You have 10 years of hindcast and observation. you want to do bias correction for some forecasting product, e.g. system 4. For system 4, each month, you will get a new forecast about the future 6 months. So if you want to do the real time bias correction, you have to take the 10 years of hindcast and observation data with you, and run \code{biasCorrect} every time you get a new forecast. That's too expensive. For some practical use in forecasting, there isn't a so high demand for accuracy. E.g., -Maybe for February and March, you can use the same biasFactor, no need to do the computation -again. +Maybe for February and March, you can use the same biasFactor, no need to do the computation +again. -It is a generic function, if in your case you need to debug, please see \code{?debug()} +It is a generic function, if in your case you need to debug, please see \code{?debug()} for how to debug S4 method. } -\section{Methods (by class)}{ -\itemize{ -\item \code{frc = data.frame,biasFactor = biasFactor}: - -\item \code{frc = list,biasFactor = biasFactor.hyfo}: -}} \examples{ + ######## hyfo grid file biascorrection ######## -# If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect +# If your input is obtained by \code{loadNcdf}, you can also directly biascorrect # the file. # First load ncdf file. filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -varname <- getNcdfVar(filePath) +varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) data(tgridData) -#' # Since the example data, has some NA values, the process will include some warning #message, +#' # Since the example data, has some NA values, the process will include some warning #message, # which can be ignored in this case. @@ -85,14 +79,14 @@ data(tgridData) biasFactor <- getBiasFactor(nc, tgridData) newFrc <- applyBiasFactor(nc, biasFactor) - + biasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant', preci = TRUE) # This method needs obs input. newFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData) biasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE) -newFrc <- applyBiasFactor(nc, biasFactor) +newFrc <- applyBiasFactor(nc, biasFactor) ######## Time series biascorrection @@ -122,12 +116,12 @@ frc_new <- applyBiasFactor(frc, biasFactor) biasFactor <- getBiasFactor(hindcast, obs, preci = TRUE) frc_new1 <- applyBiasFactor(frc, biasFactor) -# You can use other methods to biascorrect, e.g. delta method. +# You can use other methods to biascorrect, e.g. delta method. biasFactor <- getBiasFactor(hindcast, obs, method = 'delta') # delta method needs obs input. frc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs) -# +# biasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE) # eqm needs obs input frc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs) @@ -144,19 +138,18 @@ plotTS(list = TSlist, plot = 'cum') # If the forecasts you extracted only has incontinuous data for certain months and years, e.g., -# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be +# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be # for example Dec, Jan and Feb of every year from year 1999-2005. # In such case, you need to extract certain months and years from observed time series. # extractPeriod() can be then used. + + +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ -} -\author{ -Yuanchao Xu \email{xuyuanchao37@gmail.com } } \references{ Bias correction methods come from \code{biasCorrection} from \code{dowscaleR} @@ -176,7 +169,9 @@ package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki } } \seealso{ -\code{\link{biasCorrect}} for method used in bias correction. +\code{\link{biasCorrect}} for method used in bias correction. \code{\link{getBiasFactor}}, for the first part. } - +\author{ +Yuanchao Xu \email{xuyuanchao37@gmail.com } +} diff --git a/man/biasCorrect.Rd b/man/biasCorrect.Rd index 8df832d..e764fc3 100644 --- a/man/biasCorrect.Rd +++ b/man/biasCorrect.Rd @@ -1,45 +1,68 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/biasCorrect(generic).R -\docType{methods} \name{biasCorrect} \alias{biasCorrect} \alias{biasCorrect,data.frame,data.frame,data.frame-method} \alias{biasCorrect,list,list,list-method} \title{Biascorrect the input timeseries or hyfo dataset} \usage{ -biasCorrect(frc, hindcast, obs, method = "scaling", scaleType = "multi", - preci = FALSE, prThreshold = 0, extrapolate = "no") - -\S4method{biasCorrect}{data.frame,data.frame,data.frame}(frc, hindcast, obs, - method = "scaling", scaleType = "multi", preci = FALSE, - prThreshold = 0, extrapolate = "no") - -\S4method{biasCorrect}{list,list,list}(frc, hindcast, obs, method = "scaling", - scaleType = "multi", preci = FALSE, prThreshold = 0, - extrapolate = "no") +biasCorrect( + frc, + hindcast, + obs, + method = "scaling", + scaleType = "multi", + preci = FALSE, + prThreshold = 0, + extrapolate = "no" +) + +\S4method{biasCorrect}{data.frame,data.frame,data.frame}( + frc, + hindcast, + obs, + method = "scaling", + scaleType = "multi", + preci = FALSE, + prThreshold = 0, + extrapolate = "no" +) + +\S4method{biasCorrect}{list,list,list}( + frc, + hindcast, + obs, + method = "scaling", + scaleType = "multi", + preci = FALSE, + prThreshold = 0, + extrapolate = "no" +) } \arguments{ -\item{frc}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, +\item{frc}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, representing the forecast to be calibrated.} -\item{hindcast}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, +\item{hindcast}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, representing the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as observation data. Check details for more information.} -\item{obs}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, +\item{obs}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, representing the observation data.} \item{method}{bias correct method, including 'delta', 'scaling'..., default is 'scaling'} \item{scaleType}{only when the method "scaling" is chosen, scaleType will be available. Two different types -of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method. More info check +of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method. More info check details. Default scaleType is 'multi'.} \item{preci}{If the precipitation is biascorrected, then you have to assign \code{preci = TRUE}. Since for -precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. +precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. Default is FALSE, refer to details.} -\item{prThreshold}{The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).} +\item{prThreshold}{The minimum value that is considered as a non-zero precipitation. Default to 0 (assuming mm). If you want +to use precipitation biascorrect, you should consider carefully how to set this threshold, usually is 1. But you +can try with different numbers to see the results.} \item{extrapolate}{When use 'eqm' method, and 'no' is set, modified frc is bounded by the range of obs. If 'constant' is set, modified frc is not bounded by the range of obs. Default is 'no'.} @@ -47,7 +70,7 @@ If 'constant' is set, modified frc is not bounded by the range of obs. Default i \description{ Biascorrect the input time series or dataset, the input time series or dataset should consist of observation, hindcast, and forecast. observation and hindcast should belong to the same period, in order to calibrate. Then the modified forecast -will be returned. If the input is a time series, first column should be date column and rest columns should be +will be returned. If the input is a time series, first column should be date column and rest columns should be the value column. If the input is a hyfo dataset, the dataset should be the result of \code{loadNcdf}, or a list file with the same format. } @@ -58,8 +81,8 @@ some bias, biascorrection is used then to fix the bias. \strong{Hindcast} In order to bias correct, we need to pick up some data from the forecast to train with -the observation, which is called hindcast in this function. Using hindcast and observation, -the program can analyze the bias and correct the bias in the forecast. +the observation, which is called hindcast in this function. Using hindcast and observation, +the program can analyze the bias and correct the bias in the forecast. Hindcast should have \strong{EVERY} attributes that forecast has. @@ -71,16 +94,16 @@ Hindcast can be the same as forecast, i.e., you can use forecast itself as hindc \strong{How it works} -Forecast product has to be calibrated, usually the system is doing forecast in real time. So, e.g., if the -forecast starts from year 2000, assuming you are in year 2003, then you will have 3 years' hindcast +Forecast product has to be calibrated, usually the system is doing forecast in real time. So, e.g., if the +forecast starts from year 2000, assuming you are in year 2003, then you will have 3 years' hindcast data (year 2000-2003), which can be used to calibrate. And your forecast period is (2003-2004) -E.g. you have observation from 2001-2002, this is your input obs. Then you can take the same +E.g. you have observation from 2001-2002, this is your input obs. Then you can take the same period (2001-2002) from the forecast, which is the hindcast period. For forecast, you can take any period. -The program will evaluate the obs and hindcast, to get the modification of the forecast, and then add the +The program will evaluate the obs and hindcast, to get the modification of the forecast, and then add the modification to the forecast data. -The more categorized input, the more accurate result you will get. E.g., if you want to +The more categorized input, the more accurate result you will get. E.g., if you want to bias correct a forecast for winter season. So you'd better to extract all the winter period in the hindcast and observation to train. \code{extractPeriod} can be used for this purpose. @@ -93,60 +116,55 @@ designed for rainfall data, so \code{preci = TRUE} needs to be set. \strong{delta} -This method consists on adding to the observations the mean change signal (delta method). +This method consists on adding to the observations the mean change signal (delta method). This method is applicable to any kind of variable but it is preferable to avoid it for bounded variables - (e.g. precipitation, wind speed, etc.) because values out of the variable range could be obtained + (e.g. precipitation, wind speed, etc.) because values out of the variable range could be obtained (e.g. negative wind speeds...) - - \strong{scaling} - -This method consists on scaling the simulation with the difference (additive) or quotient (multiplicative) + +\strong{scaling} + +This method consists on scaling the simulation with the difference (additive) or quotient (multiplicative) between the observed and simulated means in the train period. The \code{additive} or \code{multiplicative} correction is defined by parameter \code{scaling.type} (default is \code{additive}). -The additive version is preferably applicable to unbounded variables (e.g. temperature) -and the multiplicative to variables with a lower bound (e.g. precipitation, because it also preserves the frequency). - +The additive version is preferably applicable to unbounded variables (e.g. temperature) +and the multiplicative to variables with a lower bound (e.g. precipitation, because it also preserves the frequency). + \strong{eqm} - -Empirical Quantile Mapping. This is a very extended bias correction method which consists on calibrating the simulated Cumulative Distribution Function (CDF) -by adding to the observed quantiles both the mean delta change and the individual delta changes in the corresponding quantiles. + +Empirical Quantile Mapping. This is a very extended bias correction method which consists on calibrating the simulated Cumulative Distribution Function (CDF) +by adding to the observed quantiles both the mean delta change and the individual delta changes in the corresponding quantiles. This method is applicable to any kind of variable. It can keep the extreme value, if you choose constant extrapolation method. But then you will face the risk that the extreme value is an error. - + \strong{gqm} - + Gamma Quantile Mapping. This method is described in Piani et al. 2010 and is applicable only to precipitation. It is based on the initial assumption that both observed -and simulated intensity distributions are well approximated by the gamma distribution, therefore is a parametric q-q map -that uses the theorical instead of the empirical distribution. - +and simulated intensity distributions are well approximated by the gamma distribution, therefore is a parametric q-q map +that uses the theorical instead of the empirical distribution. + It can somehow filter some extreme values caused by errors, while keep the extreme value. Seems more reasonable. Better have a long period of training, and the if the forecast system is relatively stable. -It is a generic function, if in your case you need to debug, please see \code{?debug()} +It is a generic function, if in your case you need to debug, please see \code{?debug()} for how to debug S4 method. } -\section{Methods (by class)}{ -\itemize{ -\item \code{frc = data.frame,hindcast = data.frame,obs = data.frame}: - -\item \code{frc = list,hindcast = list,obs = list}: -}} \examples{ + ######## hyfo grid file biascorrection ######## -# If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect +# If your input is obtained by \code{loadNcdf}, you can also directly biascorrect # the file. # First load ncdf file. filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -varname <- getNcdfVar(filePath) +varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) data(tgridData) -# Since the example data, has some NA values, the process will include some warning #message, +# Since the example data, has some NA values, the process will include some warning #message, # which can be ignored in this case. @@ -154,11 +172,11 @@ data(tgridData) # Then we will use nc data as forecasting data, and use itself as hindcast data, # use tgridData as observation. -newFrc <- biasCorrect(nc, nc, tgridData) -newFrc <- biasCorrect(nc, nc, tgridData, scaleType = 'add') -newFrc <- biasCorrect(nc, nc, tgridData, method = 'eqm', extrapolate = 'constant', -preci = TRUE) -newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) +newFrc <- biasCorrect(nc, nc, tgridData) +newFrc <- biasCorrect(nc, nc, tgridData, scaleType = 'add') +newFrc <- biasCorrect(nc, nc, tgridData, method = 'eqm', extrapolate = 'constant', +preci = TRUE) +newFrc <- biasCorrect(nc, nc, tgridData, method = 'gqm', preci = TRUE) ######## Time series biascorrection @@ -189,7 +207,7 @@ frc_new1 <- biasCorrect(frc, hindcast, obs, preci = TRUE) # You can use other scaling methods to biascorrect. frc_new2 <- biasCorrect(frc, hindcast, obs, scaleType = 'add') -# +# frc_new3 <- biasCorrect(frc, hindcast, obs, method = 'eqm', preci = TRUE) frc_new4 <- biasCorrect(frc, hindcast, obs, method = 'gqm', preci = TRUE) @@ -202,19 +220,18 @@ plotTS(list = TSlist, plot = 'cum') # If the forecasts you extracted only has incontinuous data for certain months and years, e.g., -# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be +# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be # for example Dec, Jan and Feb of every year from year 1999-2005. # In such case, you need to extract certain months and years from observed time series. # extractPeriod() can be then used. + + +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ -} -\author{ -Yuanchao Xu \email{xuyuanchao37@gmail.com } } \references{ Bias correction methods come from \code{biasCorrection} from \code{dowscaleR} @@ -233,4 +250,6 @@ package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki \item O. Gutjahr and G. Heinemann (2013) Comparing precipitation bias correction methods for high-resolution regional climate simulations using COSMO-CLM, Theoretical and Applied Climatology, 114, 511-529 } } - +\author{ +Yuanchao Xu \email{xuyuanchao37@gmail.com } +} diff --git a/man/biasFactor-class.Rd b/man/biasFactor-class.Rd index dfb5b4b..6464aad 100644 --- a/man/biasFactor-class.Rd +++ b/man/biasFactor-class.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classes.R \docType{class} \name{biasFactor-class} diff --git a/man/biasFactor.hyfo-class.Rd b/man/biasFactor.hyfo-class.Rd index a27d64a..eae58c7 100644 --- a/man/biasFactor.hyfo-class.Rd +++ b/man/biasFactor.hyfo-class.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classes.R \docType{class} \name{biasFactor.hyfo-class} diff --git a/man/checkBind.Rd b/man/checkBind.Rd index fd3242f..914bfdb 100644 --- a/man/checkBind.Rd +++ b/man/checkBind.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check.R \name{checkBind} \alias{checkBind} @@ -7,7 +7,7 @@ checkBind(data, bind) } \arguments{ -\item{data}{A list containing different sublists ready to be processed by \code{do.call('rbind')} +\item{data}{A list containing different sublists ready to be processed by \code{do.call('rbind')} or \code{do.call('cbind')}} \item{bind}{A string showing which bind you are going to use can be 'rbind' or 'cbind'} @@ -30,4 +30,3 @@ checkBind(testdl, 'rbind') # Since the colnames in testdl are not the same, so it cannot be bound. # } - diff --git a/man/collectData.Rd b/man/collectData.Rd index ca599db..5369824 100644 --- a/man/collectData.Rd +++ b/man/collectData.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/collectData.R \name{collectData} \alias{collectData} @@ -11,7 +11,7 @@ collectData(folderName, fileType = NULL, range = NULL, sheetIndex = 1) \item{fileType}{A string showing the file type, e.g. "txt", "csv", "excel".} -\item{range}{A vector containing startRow, endRow, startColumn, endColumn, e.g., +\item{range}{A vector containing startRow, endRow, startColumn, endColumn, e.g., c(2,15,2,3)} \item{sheetIndex}{A number showing the sheetIndex in the excel file, if fileType is excel, @@ -24,6 +24,7 @@ The collected data from different files in the folder. Collect data from different csv files. } \examples{ + #use internal data as an example. folder <- file.path(path.package("hyfo"), 'extdata') # file may vary with different environment, it if doesn't work, use local way to get @@ -31,6 +32,6 @@ folder <- file.path(path.package("hyfo"), 'extdata') a <- collectData(folder, fileType = 'csv', range = c(10, 20, 1,2)) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ -} +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ +} diff --git a/man/collectData_csv_anarbe.Rd b/man/collectData_csv_anarbe.Rd index 07c6313..ea3fa58 100644 --- a/man/collectData_csv_anarbe.Rd +++ b/man/collectData_csv_anarbe.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/case_anarbe.R \name{collectData_csv_anarbe} \alias{collectData_csv_anarbe} @@ -21,18 +21,19 @@ The collected data from different csv files. Collect data from the gauging stations in spain, catchement Anarbe } \examples{ + #use internal data as an example. file <- system.file("extdata", "1999.csv", package = "hyfo") folder <- strsplit(file, '1999')[[1]][1] a <- collectData_csv_anarbe(folder) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item http://meteo.navarra.es/estaciones/mapadeestaciones.cfm \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. } } - diff --git a/man/collectData_excel_anarbe.Rd b/man/collectData_excel_anarbe.Rd index 4e05c1b..d9ca391 100644 --- a/man/collectData_excel_anarbe.Rd +++ b/man/collectData_excel_anarbe.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/case_anarbe.R \name{collectData_excel_anarbe} \alias{collectData_excel_anarbe} @@ -25,4 +25,3 @@ Collect data from different excel files Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. } } - diff --git a/man/collectData_txt_anarbe.Rd b/man/collectData_txt_anarbe.Rd index 9fed426..1a72969 100644 --- a/man/collectData_txt_anarbe.Rd +++ b/man/collectData_txt_anarbe.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/case_anarbe.R \name{collectData_txt_anarbe} \alias{collectData_txt_anarbe} @@ -7,15 +7,18 @@ http://www4.gipuzkoa.net/oohh/web/esp/02.asp } \usage{ -collectData_txt_anarbe(folderName, output = TRUE, - rangeWord = c("Ene ", -1, "Total ", -6)) +collectData_txt_anarbe( + folderName, + output = TRUE, + rangeWord = c("Ene ", -1, "Total ", -6) +) } \arguments{ \item{folderName}{A string showing the folder path.} \item{output}{A boolean showing whether the result is given.} -\item{rangeWord}{A list containing the keyword and the shift. +\item{rangeWord}{A list containing the keyword and the shift. defaut is set to be used in spain gauging station.} } \value{ @@ -25,6 +28,7 @@ The collected data from different txt files. collect data from different txt. } \examples{ + #use internal data as an example. \dontrun{ @@ -34,13 +38,13 @@ a <- collectData_txt_anarbe(folder) } -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item http://www4.gipuzkoa.net/oohh/web/esp/02.asp \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. } } - diff --git a/man/coord2cell.Rd b/man/coord2cell.Rd index d2e9e1f..3cdd597 100644 --- a/man/coord2cell.Rd +++ b/man/coord2cell.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coordinate.R \name{coord2cell} \alias{coord2cell} @@ -19,4 +19,3 @@ A cell coordinate \description{ Change lon lat coordinates to cell coordinates } - diff --git a/man/downscaleNcdf.Rd b/man/downscaleNcdf.Rd index 1630c1a..eea1e9f 100644 --- a/man/downscaleNcdf.Rd +++ b/man/downscaleNcdf.Rd @@ -1,11 +1,10 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ncdf.R \name{downscaleNcdf} \alias{downscaleNcdf} \title{Downscale NetCDF file} \usage{ -downscaleNcdf(gridData, year = NULL, month = NULL, lon = NULL, - lat = NULL) +downscaleNcdf(gridData, year = NULL, month = NULL, lon = NULL, lat = NULL) } \arguments{ \item{gridData}{A hyfo list file from \code{\link{loadNcdf}}} @@ -31,7 +30,7 @@ Downscale NetCDF file filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -# Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name +# Then if you don't know the variable name, you can use \code{getNcdfVar} to get variable name varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) @@ -41,7 +40,8 @@ nc <- loadNcdf(filePath, varname) nc1 <- downscaleNcdf(nc, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7)) nc2 <- downscaleNcdf(nc, year = 2005, month = 3:8, lon = c(-2, -0.5), lat = c(43.2, 43.7)) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ @@ -50,4 +50,3 @@ nc2 <- downscaleNcdf(nc, year = 2005, month = 3:8, lon = c(-2, -0.5), lat = c(43 version 2.2-6. http://meteo.unican.es/ecoms-udg } } - diff --git a/man/extractPeriod.Rd b/man/extractPeriod.Rd index 604905a..b237b1c 100644 --- a/man/extractPeriod.Rd +++ b/man/extractPeriod.Rd @@ -1,20 +1,37 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extractPeriod(generic).R -\docType{methods} \name{extractPeriod} \alias{extractPeriod} \alias{extractPeriod,data.frame-method} \alias{extractPeriod,list-method} \title{Extract period from list or dataframe.} \usage{ -extractPeriod(data, startDate = NULL, endDate = NULL, - commonPeriod = FALSE, year = NULL, month = NULL) - -\S4method{extractPeriod}{data.frame}(data, startDate = NULL, endDate = NULL, - commonPeriod = FALSE, year = NULL, month = NULL) - -\S4method{extractPeriod}{list}(data, startDate = NULL, endDate = NULL, - commonPeriod = FALSE, year = NULL, month = NULL) +extractPeriod( + data, + startDate = NULL, + endDate = NULL, + commonPeriod = FALSE, + year = NULL, + month = NULL +) + +\S4method{extractPeriod}{data.frame}( + data, + startDate = NULL, + endDate = NULL, + commonPeriod = FALSE, + year = NULL, + month = NULL +) + +\S4method{extractPeriod}{list}( + data, + startDate = NULL, + endDate = NULL, + commonPeriod = FALSE, + year = NULL, + month = NULL +) } \arguments{ \item{data}{A list of different dataframes of time series, or a dataframe with first column Date, the rest columns value.} @@ -28,21 +45,21 @@ should be NULL.} \item{year}{extract certain year in the entire time series. if you want to extract year 2000, set \code{year = 2000}} -\item{month}{extract certain months in a year. e.g. if you want to extract Jan, Feb of each year, +\item{month}{extract certain months in a year. e.g. if you want to extract Jan, Feb of each year, set \code{month = c(1, 2)}.} } \value{ A list or a dataframe with all the time series inside containing the same period. } \description{ -Extract common period or certain period from a list of different dataframes of time series, or from a -dataframe. +Extract common period or certain period from a list of different dataframes of time series, or from a +dataframe. NOTE: all the dates in the datalist should follow the format in ?as.Date{base}. } \details{ \strong{startDate and endDate} -If startDate and endDate are assigned, then certain period between startDate and endDate will be returned, +If startDate and endDate are assigned, then certain period between startDate and endDate will be returned, for both datalist input and dataframe input. If startDate and endDate are NOT assigned, then, @@ -51,26 +68,20 @@ If startDate and endDate are NOT assigned, then, to the startDate and endDate. if input is a dataframe, the startDate and endDate of the input dataframe will be assigned to the startDate - and endDate . Since different value columns share a common Date column in a dataframe input. + and endDate . Since different value columns share a common Date column in a dataframe input. \strong{year and month} For year crossing month input, hyfo will take from the year before. E.g. if \code{month = c(10, 11, 12, 1)}, -and \code{year = 1999}, hyfo will take month 10, 11 and 12 from year 1998, and month 1 from 1999.You DO NOT +and \code{year = 1999}, hyfo will take month 10, 11 and 12 from year 1998, and month 1 from 1999.You DO NOT have to set \code{year = 1998 : 1999}. Well, if you set \code{year = 1998 : 1999}, hyfo will take month 10, 11 and 12 from year 1997, and month 1 from 1998, then, take month 10, 11 and 12 from year 1998, month 1 from 1999. So you only have to care about the latter year. -It is a generic function, if in your case you need to debug, please see \code{?debug()} +It is a generic function, if in your case you need to debug, please see \code{?debug()} for how to debug S4 method. } -\section{Methods (by class)}{ -\itemize{ -\item \code{data.frame}: - -\item \code{list}: -}} \examples{ # Generate timeseries datalist. Each data frame consists of a Date and a value. @@ -81,12 +92,12 @@ Date = seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1), AAA = sample(1:100,length(seq(as.Date('1990-10-28'),as.Date('1997-4-1'),1)), repl = TRUE)) BBB <- data.frame( -Date = seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1), +Date = seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1), BBB = sample(1:100,length(seq(as.Date('1993-3-28'),as.Date('1999-1-1'),1)), repl = TRUE)) - + CCC <- data.frame( -Date = seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1), -CCC = sample(1:100,length(seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1)), repl = TRUE)) +Date = seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1), +CCC = sample(1:100,length(seq(as.Date('1988-2-2'),as.Date('1996-1-1'),1)), repl = TRUE)) list <- list(AAA, BBB, CCC)# dput() and dget() can be used to save and load list file. @@ -96,7 +107,7 @@ list_com <- extractPeriod(list, commonPeriod = TRUE) str(list_com) # If startDate and endDate is provided, the record between them will be extracted. -# make sure startDate is later than any startDate in each dataframe and endDate is +# make sure startDate is later than any startDate in each dataframe and endDate is # earlier than any endDate in each dataframe. data(testdl) @@ -109,12 +120,12 @@ dataframe_new <- extractPeriod(dataframe, month = c(1,2,3)) dataframe_new <- extractPeriod(dataframe, month = c(12,1,2), year = 1995) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item Achim Zeileis and Gabor Grothendieck (2005). zoo: S3 Infrastructure for Regular and Irregular Time -Series. Journal of Statistical Software, 14(6), 1-27. URL http://www.jstatsoft.org/v14/i06/ +Series. Journal of Statistical Software, 14(6), 1-27. URL https://www.jstatsoft.org/v14/i06/ } } - diff --git a/man/fillGap.Rd b/man/fillGap.Rd index 5c081fb..d682b39 100644 --- a/man/fillGap.Rd +++ b/man/fillGap.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fillGap.R \name{fillGap} \alias{fillGap} @@ -9,7 +9,7 @@ fillGap(dataset, corPeriod = "daily") \arguments{ \item{dataset}{A dataframe with first column the time, the rest columns are rainfall data of different gauges} -\item{corPeriod}{A string showing the period used in the correlation computing, +\item{corPeriod}{A string showing the period used in the correlation computing, e.g. daily, monthly, yearly.} } \value{ @@ -23,21 +23,21 @@ the gap filler follows the rules below: 1. The correlation coefficient of every two columns (except time column) is calculated. the correlation coefficient calculation can be based on 'daily', 'monthly', 'annual', -in each case, the daily data, the monthly mean daily data and annual mean daily data of +in each case, the daily data, the monthly mean daily data and annual mean daily data of each column will be taken in the correlation calculation. -Then the correlation matrix is got, then based on the matrix, for each column, +Then the correlation matrix is got, then based on the matrix, for each column, the 1st, 2nd, 3rd,... correlated column will be got. So if there is missing value in the column, it will get data from orderly 1st, 2nd, 3rd column. 2. The simple linear regress is calculated between every two columns. When generating the - linear coefficient, the incept should be force to 0. i.e. y = a*x + b should be forec to + linear coefficient, the incept should be force to 0. i.e. y = a*x + b should be forec to y = a*x. - + 3. Gap filling. E.g., on a certain date, there is a missing value in column A, then the correlation order is column B, column C, column D, which means A should take values from B firstly, if B is also missing data, then C, then D. - + Assuming finally value from column C is taken. Then according to step 2, A = a*C, then the final value filled in column A is missing_in_A = a*value_in_C, a is the linear coeffcient. } @@ -61,7 +61,8 @@ a2 <- fillGap(a1) a3 <- fillGap(a1, corPeriod = 'monthly') -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ Gap fiiling method based on correlation and linear regression. @@ -72,4 +73,3 @@ Salas, Jose D. "Analysis and modeling of hydrologic time series." Handbook of hy } } - diff --git a/man/getAnnual.Rd b/man/getAnnual.Rd index 7b5623e..47fad7d 100644 --- a/man/getAnnual.Rd +++ b/man/getAnnual.Rd @@ -1,6 +1,5 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getAnnual(generic).R -\docType{methods} \name{getAnnual} \alias{getAnnual} \alias{getAnnual,data.frame-method} @@ -9,8 +8,7 @@ \usage{ getAnnual(data, output = "series", minRecords = 355, ...) -\S4method{getAnnual}{data.frame}(data, output = "series", minRecords = 355, - ...) +\S4method{getAnnual}{data.frame}(data, output = "series", minRecords = 355, ...) \S4method{getAnnual}{list}(data, output = "series", minRecords = 355, ...) } @@ -20,30 +18,24 @@ gauging stations. Usually an output of \code{list2Dataframe}.} \item{output}{A string showing the output output.} -\item{minRecords}{A number showing the minimum accept record number, e.g. for a normal +\item{minRecords}{A number showing the minimum accept record number, e.g. for a normal year(365 days), if \code{minRecords = 360}, it means if a year has less than 360 records -of a year, it will be ignored in the mean annual value calculation. Only valid +of a year, it will be ignored in the mean annual value calculation. Only valid when \code{output = "mean"}, default is 355.} \item{...}{\code{title, x, y} showing the title and x and y axis of the plot. e.g. \code{title = 'aaa'}} } \value{ -The annual rainfall and the number of missing data of each year and each rainfall gauge, which +The annual rainfall and the number of missing data of each year and each rainfall gauge, which will also be plotted. If output "mean" is seleted, the mean annual rainfall will be returned. } \description{ Get annual rainfall of different raninfall time series. } \details{ -It is a generic function, if in your case you need to debug, please see \code{?debug()} +It is a generic function, if in your case you need to debug, please see \code{?debug()} for how to debug S4 method. } -\section{Methods (by class)}{ -\itemize{ -\item \code{data.frame}: - -\item \code{list}: -}} \examples{ #datalist is provided by the package as a test. data(testdl) @@ -60,7 +52,8 @@ a3 <- fillGap(a2) getAnnual(a3) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ @@ -68,7 +61,6 @@ getAnnual(a3) \item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software, 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/. \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. } } - diff --git a/man/getAnnual_dataframe.Rd b/man/getAnnual_dataframe.Rd index d0aefd1..ca8955b 100644 --- a/man/getAnnual_dataframe.Rd +++ b/man/getAnnual_dataframe.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getAnnual(generic).R \name{getAnnual_dataframe} \alias{getAnnual_dataframe} @@ -16,4 +16,3 @@ The annual rainfall of each year of the input station. \description{ Get annual rainfall of the input time series. } - diff --git a/man/getBiasFactor.Rd b/man/getBiasFactor.Rd index 822e984..19a53bd 100644 --- a/man/getBiasFactor.Rd +++ b/man/getBiasFactor.Rd @@ -1,39 +1,57 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multi-biasCorrect(generic).R -\docType{methods} \name{getBiasFactor} \alias{getBiasFactor} \alias{getBiasFactor,data.frame,data.frame-method} \alias{getBiasFactor,list,list-method} \title{Get bias factor for multi/operational/real time bias correction.} \usage{ -getBiasFactor(hindcast, obs, method = "scaling", scaleType = "multi", - preci = FALSE, prThreshold = 0, extrapolate = "no") - -\S4method{getBiasFactor}{data.frame,data.frame}(hindcast, obs, - method = "scaling", scaleType = "multi", preci = FALSE, - prThreshold = 0, extrapolate = "no") - -\S4method{getBiasFactor}{list,list}(hindcast, obs, method = "scaling", - scaleType = "multi", preci = FALSE, prThreshold = 0, - extrapolate = "no") +getBiasFactor( + hindcast, + obs, + method = "scaling", + scaleType = "multi", + preci = FALSE, + prThreshold = 0, + extrapolate = "no" +) + +\S4method{getBiasFactor}{data.frame,data.frame}( + hindcast, + obs, + method = "scaling", + scaleType = "multi", + preci = FALSE, + prThreshold = 0, + extrapolate = "no" +) + +\S4method{getBiasFactor}{list,list}( + hindcast, + obs, + method = "scaling", + scaleType = "multi", + preci = FALSE, + prThreshold = 0, + extrapolate = "no" +) } \arguments{ -\item{hindcast}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, +\item{hindcast}{a hyfo grid data output or a dataframe(time series) consists of Date column and one or more value columns, representing the hindcast data. This data will be used in the calibration of the forecast, so it's better to have the same date period as observation data. Check details for more information.} -\item{obs}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, +\item{obs}{a hyfo grid data output or a dataframe (time series) consists of Date column and one or more value columns, representing the observation data.} \item{method}{bias correct method, including 'delta', 'scaling'...,default method is 'scaling'.} \item{scaleType}{only when the method "scaling" is chosen, scaleType will be available. Two different types -of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method, default is 'multi'. More info check +of scaling method, 'add' and 'multi', which means additive and multiplicative scaling method, default is 'multi'. More info check details.} \item{preci}{If the precipitation is biascorrected, then you have to assign \code{preci = TRUE}. Since for -precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. +precipitation, some biascorrect methods may not apply to, or some methods are specially for precipitation. Default is FALSE, refer to details.} \item{prThreshold}{The minimum value that is considered as a non-zero precipitation. Default to 1 (assuming mm).} @@ -46,7 +64,7 @@ When you do multi/operational/real time bias correction. It's too expensive to input hindcast and obs every time. Especially when you have a long period of hindcast and obs, but only a short period of frc, it's too unecessary to read and compute hindcast and obs everytime. Therefore, biasFactor is designed. Using \code{getBiasFactor}, you can -get the biasFactor with hindcast and observation, then you can use \code{applyBiasFactor} to +get the biasFactor with hindcast and observation, then you can use \code{applyBiasFactor} to apply the biasFactor to different forecasts. } \details{ @@ -60,39 +78,34 @@ period of hindcast and obs, and apply that factor to different frc. For example, -You have 10 years of hindcast and observation. you want to do bias correction for some +You have 10 years of hindcast and observation. you want to do bias correction for some forecasting product, e.g. system 4. For system 4, each month, you will get a new forecast about the future 6 months. So if you want to do the real time bias correction, you have to take the 10 years of hindcast and observation data with you, and run \code{biasCorrect} every time you get a new forecast. That's too expensive. For some practical use in forecasting, there isn't a so high demand for accuracy. E.g., -Maybe for February and March, you can use the same biasFactor, no need to do the computation -again. - -It is a generic function, if in your case you need to debug, please see \code{?debug()} +Maybe for February and March, you can use the same biasFactor, no need to do the computation +again. + +It is a generic function, if in your case you need to debug, please see \code{?debug()} for how to debug S4 method. } -\section{Methods (by class)}{ -\itemize{ -\item \code{hindcast = data.frame,obs = data.frame}: - -\item \code{hindcast = list,obs = list}: -}} \examples{ + ######## hyfo grid file biascorrection ######## -# If your input is obtained by \\code{loadNcdf}, you can also directly biascorrect +# If your input is obtained by \code{loadNcdf}, you can also directly biascorrect # the file. # First load ncdf file. filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -varname <- getNcdfVar(filePath) +varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) data(tgridData) -# Since the example data, has some NA values, the process will include some warning #message, +# Since the example data, has some NA values, the process will include some warning #message, # which can be ignored in this case. @@ -102,14 +115,14 @@ data(tgridData) biasFactor <- getBiasFactor(nc, tgridData) newFrc <- applyBiasFactor(nc, biasFactor) - + biasFactor <- getBiasFactor(nc, tgridData, method = 'eqm', extrapolate = 'constant', preci = TRUE) # This method needs obs input. newFrc <- applyBiasFactor(nc, biasFactor, obs = tgridData) biasFactor <- getBiasFactor(nc, tgridData, method = 'gqm', preci = TRUE) -newFrc <- applyBiasFactor(nc, biasFactor) +newFrc <- applyBiasFactor(nc, biasFactor) ######## Time series biascorrection @@ -139,12 +152,12 @@ frc_new <- applyBiasFactor(frc, biasFactor) biasFactor <- getBiasFactor(hindcast, obs, preci = TRUE) frc_new1 <- applyBiasFactor(frc, biasFactor) -# You can use other methods to biascorrect, e.g. delta method. +# You can use other methods to biascorrect, e.g. delta method. biasFactor <- getBiasFactor(hindcast, obs, method = 'delta') # delta method needs obs input. frc_new2 <- applyBiasFactor(frc, biasFactor, obs = obs) -# +# biasFactor <- getBiasFactor(hindcast, obs, method = 'eqm', preci = TRUE) # eqm needs obs input frc_new3 <- applyBiasFactor(frc, biasFactor, obs = obs) @@ -161,19 +174,18 @@ plotTS(list = TSlist, plot = 'cum') # If the forecasts you extracted only has incontinuous data for certain months and years, e.g., -# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be +# for seasonal forecasting, forecasts only provide 3-6 months data, so the case can be # for example Dec, Jan and Feb of every year from year 1999-2005. # In such case, you need to extract certain months and years from observed time series. # extractPeriod() can be then used. + + +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ -} -\author{ -Yuanchao Xu \email{xuyuanchao37@gmail.com } } \references{ Bias correction methods come from \code{biasCorrection} from \code{dowscaleR} @@ -196,4 +208,6 @@ package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki \code{\link{biasCorrect}} for method used in bias correction. \code{\link{applyBiasFactor}}, for the second part. } - +\author{ +Yuanchao Xu \email{xuyuanchao37@gmail.com } +} diff --git a/man/getEnsem_comb.Rd b/man/getEnsem_comb.Rd index 3e62f58..5e2e869 100644 --- a/man/getEnsem_comb.Rd +++ b/man/getEnsem_comb.Rd @@ -1,14 +1,22 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getEnsemble.R \name{getEnsem_comb} \alias{getEnsem_comb} \title{Combine ensembles together} \usage{ -getEnsem_comb(..., list = NULL, nrow = 1, legend = TRUE, x = "", - y = "", title = "", output = FALSE) +getEnsem_comb( + ..., + list = NULL, + nrow = 1, + legend = TRUE, + x = "", + y = "", + title = "", + output = FALSE +) } \arguments{ -\item{...}{different ensembles generated by \code{getHisEnsem(, output = 'ggplot')} +\item{...}{different ensembles generated by \code{getHisEnsem(, output = 'ggplot')} or \code{getFrcEnsem(, output = 'ggplot')}, see details.} \item{list}{If input is a list containing different ggplot data, use \code{list = inputlist}.} @@ -34,6 +42,7 @@ A combined ensemble plot. Combine ensembles together } \examples{ + data(testdl) a <- testdl[[1]] @@ -43,14 +52,16 @@ a <- testdl[[1]] b1<- getHisEnsem(a, example = c('1995-2-4', '1996-1-4'), plot = 'cum', output = 'ggplot', name = 1) - + b2 <- getHisEnsem(a, example = c('1995-4-4', '1996-3-4'), plot = 'cum', output = 'ggplot', name = 2) getEnsem_comb(b1, b2) getEnsem_comb(list = list(b1, b2), nrow = 2) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + + } \references{ \itemize{ @@ -60,4 +71,3 @@ statistical downscaling. R package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki } } - diff --git a/man/getFrcEnsem.Rd b/man/getFrcEnsem.Rd index f8a063b..e97a179 100644 --- a/man/getFrcEnsem.Rd +++ b/man/getFrcEnsem.Rd @@ -1,11 +1,19 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getEnsemble.R \name{getFrcEnsem} \alias{getFrcEnsem} \title{Extract time series from forecasting data.} \usage{ -getFrcEnsem(dataset, cell = "mean", plot = "norm", output = "data", - name = NULL, mv = 0, coord = NULL, ...) +getFrcEnsem( + dataset, + cell = "mean", + plot = "norm", + output = "data", + name = NULL, + mv = 0, + coord = NULL, + ... +) } \arguments{ \item{dataset}{A list containing different information, should be the result of \code{\link{loadNcdf}}} @@ -13,10 +21,10 @@ getFrcEnsem(dataset, cell = "mean", plot = "norm", output = "data", \item{cell}{A vector containing the locaton of the cell, e.g. c(2, 3), default is "mean", representing the spatially averaged value. Check details for more information.} -\item{plot}{A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), +\item{plot}{A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), 'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.} -\item{output}{A string showing which type of output you want. Default is "data", if "ggplot", the +\item{output}{A string showing which type of output you want. Default is "data", if "ggplot", the data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series plots afterwards. NOTE: If \code{output = 'ggplot'}, the missing value in the data will be replaced by \code{mv}, if assigned, default mv is 0.} @@ -24,7 +32,7 @@ be replaced by \code{mv}, if assigned, default mv is 0.} \item{name}{If \code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate different outputs in the later multiplot using \code{getEnsem_comb}.} -\item{mv}{A number showing representing the missing value. When calculating the cumulative value, +\item{mv}{A number showing representing the missing value. When calculating the cumulative value, missing value will be replaced by mv, default is 0.} \item{coord}{A coordinate of longitude and latitude. e.g. corrd = c(lon, lat). If coord is assigned, @@ -52,8 +60,9 @@ by the argument directly, If name is not assigned and \code{output = 'ggplot'} i the system time will be selected as name column. } \examples{ + filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -# Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name +# Then if you don't know the variable name, you can use \code{getNcdfVar} to get variable name varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) a <- getFrcEnsem(nc) @@ -62,9 +71,9 @@ a <- getFrcEnsem(nc) a1 <- getFrcEnsem(tgridData) -# The default output is spatially averaged, if there are more than one cells in the dataset, -# the mean value of the cells will be calculated. While if you are interested in special cell, -# you can assign the cell value. You can also directly use longitude and latitude to extract +# The default output is spatially averaged, if there are more than one cells in the dataset, +# the mean value of the cells will be calculated. While if you are interested in special cell, +# you can assign the cell value. You can also directly use longitude and latitude to extract # time series. getSpatialMap(nc, 'mean') @@ -75,7 +84,8 @@ a <- getFrcEnsem(nc, cell = c(6,2)) b <- getFrcEnsem(nc, coord = c(-1.4, 43.2)) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ @@ -87,4 +97,3 @@ statistical downscaling. R package version 0.6-0. https://github.com/SantanderMetGroup/downscaleR/wiki } } - diff --git a/man/getHisEnsem.Rd b/man/getHisEnsem.Rd index b0fec84..9997f55 100644 --- a/man/getHisEnsem.Rd +++ b/man/getHisEnsem.Rd @@ -1,34 +1,43 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getEnsemble.R \name{getHisEnsem} \alias{getHisEnsem} \title{Get ensemble forecast from historical data.} \usage{ -getHisEnsem(TS, example, interval = 365, buffer = 0, plot = "norm", - output = "data", name = NULL, mv = 0, ...) +getHisEnsem( + TS, + example, + interval = 365, + buffer = 0, + plot = "norm", + output = "data", + name = NULL, + mv = 0, + ... +) } \arguments{ \item{TS}{A time series dataframe, with first column Date, and second column value.} -\item{example}{A vector containing two strings showing the start and end date, which represent the +\item{example}{A vector containing two strings showing the start and end date, which represent the forecasting period. Check details for more information. -the program will extract every possible period in TS you provided to generate the ensemble. Check details for +the program will extract every possible period in TS you provided to generate the ensemble. Check details for more information.} \item{interval}{A number representing the interval of each ensemble member. NOTE: "interval" takes -365 as a year, and 30 as a month, regardless of leap year and months with 31 days. So if you want the interval -to be 2 years, set \code{interval = 730}, which equals 2 * 365 ; if two months, set \code{interval = 60}; -2 days, \code{interval = 2}, for other numbers that cannot be divided by 365 or 30 without remainder, it will treat the +365 as a year, and 30 as a month, regardless of leap year and months with 31 days. So if you want the interval +to be 2 years, set \code{interval = 730}, which equals 2 * 365 ; if two months, set \code{interval = 60}; +2 days, \code{interval = 2}, for other numbers that cannot be divided by 365 or 30 without remainder, it will treat the number as days.By defualt interval is set to be 365, a year.} \item{buffer}{A number showing how many days are used as buffer period for models. Check details for more information.} -\item{plot}{A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), +\item{plot}{A string showing whether the plot will be shown, e.g., 'norm' means normal plot (without any process), 'cum' means cummulative plot, default is 'norm'. For other words there will be no plot.} -\item{output}{A string showing which type of output you want. Default is "data", if "ggplot", the +\item{output}{A string showing which type of output you want. Default is "data", if "ggplot", the data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series plots afterwards. NOTE: If \code{output = 'ggplot'}, the missing value in the data will be replaced by \code{mv}, if assigned, default mv is 0.} @@ -36,7 +45,7 @@ be replaced by \code{mv}, if assigned, default mv is 0.} \item{name}{If \code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate different outputs in the later multiplot using \code{getEnsem_comb}.} -\item{mv}{A number showing representing the missing value. When calculating the cumulative value, +\item{mv}{A number showing representing the missing value. When calculating the cumulative value, missing value will be replaced by mv, default is 0.} \item{...}{\code{title, x, y} showing the title and x and y axis of the plot. e.g. \code{title = 'aaa'}} @@ -57,7 +66,7 @@ forecasts. In this case your input example should be \code{example = c('2003-2-1 e.g., if the interval is from 1999-2-1 to 1999-3-1, you should just set interval to 30, although the real interval is 28 days. -\code{example} and \code{interval} controls how the ensemble will be generated. e.g. if the time series is from +\code{example} and \code{interval} controls how the ensemble will be generated. e.g. if the time series is from 1990-1-1 to 2001-1-1. if \code{example = c('1992-3-1', '1994-1-1')} and \code{interval = 1095}, note, 1095 = 365 * 3, so the program treat @@ -97,6 +106,7 @@ need a name column to differentiate one ggplot output from the other. You can as by the argument directly, name has to be assigned if \code{output = 'ggplot'} is selected, } \examples{ + data(testdl) a <- testdl[[1]] @@ -107,12 +117,14 @@ b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4')) # Default interval is one year, can be set to other values, check help for information. # Take 7 months as interval -b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, plot = 'cum') +b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, plot = 'cum') # Take 30 days as buffer b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, buffer = 30) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + + } \references{ \itemize{ @@ -121,4 +133,3 @@ b <- getHisEnsem(a, example = c('1994-2-4', '1996-1-4'), interval = 210, buffer \item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. } } - diff --git a/man/getLMom.Rd b/man/getLMom.Rd index e91ef2f..516e2cc 100644 --- a/man/getLMom.Rd +++ b/man/getLMom.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyzeTS.R \name{getLMom} \alias{getLMom} @@ -19,12 +19,12 @@ get L moment analysis of the input distribution dis <- seq(1, 100) getLMom(dis) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item J. R. M. Hosking (2015). L-moments. R package, version 2.5. URL: -http://CRAN.R-project.org/package=lmom. +https://CRAN.R-project.org/package=lmom. } } - diff --git a/man/getMeanPreci.Rd b/man/getMeanPreci.Rd index 1e98e68..fb11eb3 100644 --- a/man/getMeanPreci.Rd +++ b/man/getMeanPreci.Rd @@ -1,11 +1,19 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getMeanPreci.R \name{getMeanPreci} \alias{getMeanPreci} \title{Get mean rainfall data.} \usage{ -getMeanPreci(inputTS, method = NULL, yearIndex = NULL, monthIndex = NULL, - fullResults = FALSE, omitNA = TRUE, plot = FALSE, ...) +getMeanPreci( + inputTS, + method = NULL, + yearIndex = NULL, + monthIndex = NULL, + fullResults = FALSE, + omitNA = TRUE, + plot = FALSE, + ... +) } \arguments{ \item{inputTS}{A time series with only data column (1 column).} @@ -17,14 +25,14 @@ more information please refer to details.} \item{monthIndex}{A NUMERIC ARRAY showing the month index of the time series.} -\item{fullResults}{A boolean showing whether the full results are shown, default is FALSE. If +\item{fullResults}{A boolean showing whether the full results are shown, default is FALSE. If FALSE, only mean value will be returned, if TRUE, the sequence of values will be returned.} \item{omitNA}{A boolean showing in the calculation, whether NA is omitted, default is FALSE.} \item{plot}{A boolean showing whether the results will be plotted.} -\item{...,}{\code{title, x, y} showing the title and x and y axis of the plot, shoud be a string.} +\item{..., }{\code{title, x, y} showing the title and x and y axis of the plot, shoud be a string.} } \value{ The mean value of the input time series or the full results before calculating mean. @@ -33,8 +41,8 @@ The mean value of the input time series or the full results before calculating m Get mean rainfall data, e.g. mean annual rainfall, mean monthly rainfall and mean winter rainfall. } \details{ -There are following methods to be selected, -"annual": annual rainfall of each year is plotted. +There are following methods to be selected, +"annual": annual rainfall of each year is plotted. "winter", "spring", "autumn", "summer": seasonal rainfall of each year is plotted. Month(number 1 to 12): month rainfall of each year is plotted, e.g. march rainfall of each year. "meanMonthly": the mean monthly rainfall of each month over the whole period. @@ -42,4 +50,3 @@ Month(number 1 to 12): month rainfall of each year is plotted, e.g. march rainfa Since "winter" is a crossing year, 12, 1, 2, 12 is in former year, and 1, 2 are in latter year. so winter belongs to the latter year. } - diff --git a/man/getMoment.Rd b/man/getMoment.Rd index 28ded94..4f962b1 100644 --- a/man/getMoment.Rd +++ b/man/getMoment.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyzeTS.R \name{getMoment} \alias{getMoment} @@ -19,15 +19,15 @@ get moment analysis of the input distribution dis <- seq(1, 100) getMoment(dis) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item Lukasz Komsta and Frederick Novomestky (2015). moments: Moments, cumulants, skewness, kurtosis and -related tests. R package version 0.14. http://CRAN.R-project.org/package=moments +related tests. R package version 0.14. https://CRAN.R-project.org/package=moments \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. } } - diff --git a/man/getNcdfVar.Rd b/man/getNcdfVar.Rd index 725ba25..916f95e 100644 --- a/man/getNcdfVar.Rd +++ b/man/getNcdfVar.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ncdf.R \name{getNcdfVar} \alias{getNcdfVar} @@ -20,16 +20,16 @@ the target variable. # First open the test NETcDF file. filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -# Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name +# Then if you don't know the variable name, you can use \code{getNcdfVar} to get variable name varname <- getNcdfVar(filePath) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or Earlier) Format Data Files. R package version 1.14.1. -http://CRAN.R-project.org/package=ncdf4 +https://CRAN.R-project.org/package=ncdf4 } } - diff --git a/man/getPreciBar.Rd b/man/getPreciBar.Rd index cce6f9b..d712fb4 100644 --- a/man/getPreciBar.Rd +++ b/man/getPreciBar.Rd @@ -1,22 +1,49 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getPreciBar(generic).R -\docType{methods} \name{getPreciBar} \alias{getPreciBar} -\alias{getPreciBar,data.frame-method} \alias{getPreciBar,list-method} +\alias{getPreciBar,data.frame-method} \title{get mean rainfall bar plot of the input dataset or time series.} \usage{ -getPreciBar(data, method, cell = "mean", output = "data", name = NULL, - plotRange = TRUE, member = NULL, omitNA = TRUE, info = FALSE, ...) - -\S4method{getPreciBar}{list}(data, method, cell = "mean", output = "data", - name = NULL, plotRange = TRUE, member = NULL, omitNA = TRUE, - info = FALSE, ...) - -\S4method{getPreciBar}{data.frame}(data, method, cell = "mean", - output = "data", name = NULL, plotRange = TRUE, member = NULL, - omitNA = TRUE, info = FALSE, ...) +getPreciBar( + data, + method, + cell = "mean", + output = "data", + name = NULL, + plotRange = TRUE, + member = NULL, + omitNA = TRUE, + info = FALSE, + ... +) + +\S4method{getPreciBar}{list}( + data, + method, + cell = "mean", + output = "data", + name = NULL, + plotRange = TRUE, + member = NULL, + omitNA = TRUE, + info = FALSE, + ... +) + +\S4method{getPreciBar}{data.frame}( + data, + method, + cell = "mean", + output = "data", + name = NULL, + plotRange = TRUE, + member = NULL, + omitNA = TRUE, + info = FALSE, + ... +) } \arguments{ \item{data}{A list containing different information, should be the result of reading netcdf file using @@ -29,8 +56,8 @@ please refer to the details.} \item{cell}{A vector containing the locaton of the cell, e.g. c(2, 3), default is "mean", representing the spatially averaged value. Check details for more information.} -\item{output}{A string showing the type of the output, if \code{output = 'ggplot'}, the returned -data can be used in ggplot and \code{getPreciBar_comb()}; if \code{output = 'plot'}, the returned data is the plot containing all +\item{output}{A string showing the type of the output, if \code{output = 'ggplot'}, the returned +data can be used in ggplot and \code{getPreciBar_comb()}; if \code{output = 'plot'}, the returned data is the plot containing all layers' information, and can be plot directly or used in grid.arrange; if not set, the data will be returned.} @@ -56,8 +83,8 @@ The calculated mean value of the input time series and the plot of the result. get mean rainfall bar plot of the input dataset or time series. } \details{ -There are following methods to be selected, -"annual": annual rainfall of each year is plotted. +There are following methods to be selected, +"annual": annual rainfall of each year is plotted. "winter", "spring", "autumn", "summer": seasonal rainfall of each year is plotted. Month(number 1 to 12): month rainfall of each year is plotted, e.g. march rainfall of each year. "meanMonthly": the mean monthly rainfall of each month over the whole period. @@ -71,17 +98,11 @@ IT IS NOT THE LONGITUDE AND LATITUDE. e.g., \code{cell = c(2, 3)}, the program w and 3rd latitude, by the increasing order. Longitude comes first. -It is a generic function, if in your case you need to debug, please see \code{?debug()} +It is a generic function, if in your case you need to debug, please see \code{?debug()} for how to debug S4 method. } -\section{Methods (by class)}{ -\itemize{ -\item \code{list}: - -\item \code{data.frame}: -}} \examples{ -#gridData provided by package is the result of \\code{loadNcdf()} +#gridData provided by package is the result of \code{loadNcdf()} data(tgridData) b1 <- getPreciBar(tgridData, method = 'annual') b2 <- getPreciBar(tgridData, method = 'meanMonthly') @@ -93,7 +114,8 @@ a <- getPreciBar(TS, method = 'spring') a <- getPreciBar(TS, method = 'spring', info = TRUE) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ @@ -101,7 +123,6 @@ a <- getPreciBar(TS, method = 'spring', info = TRUE) 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/. \item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. } } - diff --git a/man/getPreciBar_comb.Rd b/man/getPreciBar_comb.Rd index 6c65499..6f95087 100644 --- a/man/getPreciBar_comb.Rd +++ b/man/getPreciBar_comb.Rd @@ -1,11 +1,18 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getPreciBar(generic).R \name{getPreciBar_comb} \alias{getPreciBar_comb} \title{Combine bars together} \usage{ -getPreciBar_comb(..., list = NULL, nrow = 1, x = "", y = "", - title = "", output = FALSE) +getPreciBar_comb( + ..., + list = NULL, + nrow = 1, + x = "", + y = "", + title = "", + output = FALSE +) } \arguments{ \item{...}{different barplots generated by \code{getPreciBar(, output = 'ggplot')}, refer to details.} @@ -30,27 +37,28 @@ A combined barplot. Combine bars together } \details{ -..., representing different ouput generated by \code{getPreciBar(, output = 'ggplot')}, they -have to be of the same type, e.g., -1. Jan precipitation of different years, Feb precipitation of different years, and... +..., representing different ouput generated by \code{getPreciBar(, output = 'ggplot')}, they +have to be of the same type, e.g., +1. Jan precipitation of different years, Feb precipitation of different years, and... They are both monthly precipitation, and they share x axis. 2. Mean monthly precipitation of different dataset. e.g., long term mean monthly precipitation and short term mean monthly precipitation. They are both mean monthly precipitation. } \examples{ -data(tgridData)# the result of \\code{\\link{loadNcdf}} + +data(tgridData)# the result of \code{\link{loadNcdf}} #output type of getPreciBar() has to be 'ggplot'. b1 <- getPreciBar(tgridData, method = 2, output = 'ggplot', name = 'b1') b2 <- getPreciBar(tgridData, method = 3, output = 'ggplot', name = 'b2') getPreciBar_comb(b1, b2) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. } } - diff --git a/man/getSpatialMap.Rd b/man/getSpatialMap.Rd index 4a3d047..94bd55d 100644 --- a/man/getSpatialMap.Rd +++ b/man/getSpatialMap.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getSpatialMap.R \name{getSpatialMap} \alias{getSpatialMap} @@ -17,7 +17,7 @@ details.} is NULL, if no member assigned, and there is a "member" in dimensions, the mean value of the members will be taken.} -\item{...}{several arguments including x, y, title, catchment, point, output, name, info, scale, color, +\item{...}{several arguments including x, y, title, catchment, point, output, name, info, scale, color, type in \code{?getSpatialMap_mat} for details.} } \value{ @@ -27,15 +27,17 @@ A matrix representing the raster map is returned, and the map is plotted. Get spatial map of the input dataset. } \details{ -There are following methods to be selected, -"meanAnnual": annual rainfall of each year is plotted. +There are following methods to be selected, +"meanAnnual": annual rainfall of each year is plotted. "winter", "spring", "autumn", "summer": MEAN seasonal rainfall of each year is plotted. Month(number 1 to 12): MEAN month rainfall of each year is plotted, e.g. MEAN march rainfall of each year. "mean", "max", "min": mean daily, maximum daily, minimum daily precipitation. } \examples{ + + \dontrun{ -#gridData provided in the package is the result of \\code {loadNcdf} +#gridData provided in the package is the result of \code {loadNcdf} data(tgridData) getSpatialMap(tgridData, method = 'meanAnnual') getSpatialMap(tgridData, method = 'winter') @@ -50,5 +52,5 @@ getSpatialMap(tgridData, method = 'winter', catchment = testCat, point = point) # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ -} +} diff --git a/man/getSpatialMap_comb.Rd b/man/getSpatialMap_comb.Rd index 7d8b009..50d8c93 100644 --- a/man/getSpatialMap_comb.Rd +++ b/man/getSpatialMap_comb.Rd @@ -1,11 +1,18 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getSpatialMap.R \name{getSpatialMap_comb} \alias{getSpatialMap_comb} \title{Combine maps together} \usage{ -getSpatialMap_comb(..., list = NULL, nrow = 1, x = "", y = "", - title = "", output = FALSE) +getSpatialMap_comb( + ..., + list = NULL, + nrow = 1, + x = "", + y = "", + title = "", + output = FALSE +) } \arguments{ \item{...}{different maps generated by \code{getSpatialMap(, output = 'ggplot')}, see details.} @@ -29,14 +36,16 @@ A combined map. Combine maps together } \details{ -For \code{getSpatialMap_comb}, the maps to be compared should be with same size and resolution, +For \code{getSpatialMap_comb}, the maps to be compared should be with same size and resolution, in other words, they should be fully overlapped by each other. If they have different resolutions, use \code{interpGridData{ecomsUDG.Raccess}} to interpolate. } \examples{ + + \dontrun{ -data(tgridData)# the result of \\code{\\link{loadNcdf}} +data(tgridData)# the result of \code{\link{loadNcdf}} #The output should be 'ggplot' a1 <- getSpatialMap(tgridData, method = 'summer', output = 'ggplot', name = 'a1') a2 <- getSpatialMap(tgridData, method = 'winter', output = 'ggplot', name = 'a2') @@ -50,11 +59,11 @@ getSpatialMap_comb(list = list(a1, a2), nrow = 2) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. } } - diff --git a/man/getSpatialMap_mat.Rd b/man/getSpatialMap_mat.Rd index 08d34d3..f786dd1 100644 --- a/man/getSpatialMap_mat.Rd +++ b/man/getSpatialMap_mat.Rd @@ -1,12 +1,21 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getSpatialMap.R \name{getSpatialMap_mat} \alias{getSpatialMap_mat} \title{Replot raster matrix} \usage{ -getSpatialMap_mat(matrix, title_d = NULL, catchment = NULL, point = NULL, - output = "data", name = NULL, info = FALSE, scale = "identity", - color = NULL, ...) +getSpatialMap_mat( + matrix, + title_d = NULL, + catchment = NULL, + point = NULL, + output = "data", + name = NULL, + info = FALSE, + scale = "identity", + color = NULL, + ... +) } \arguments{ \item{matrix}{A matrix raster, should be the result of \code{getSpatialMap()}, output should be default @@ -16,11 +25,11 @@ or 'data'} \item{catchment}{A catchment file geting from \code{shp2cat()} in the package, if a catchment is available for background.} -\item{point}{A dataframe, showing other information, e.g., location of the gauging stations. The +\item{point}{A dataframe, showing other information, e.g., location of the gauging stations. The the data.frame should be with columes "name, lon, lat, z, value".} -\item{output}{A string showing the type of the output, if \code{output = 'ggplot'}, the returned -data can be used in ggplot and \code{getSpatialMap_comb()}; if \code{output = 'plot'}, the returned data is the plot containing all +\item{output}{A string showing the type of the output, if \code{output = 'ggplot'}, the returned +data can be used in ggplot and \code{getSpatialMap_comb()}; if \code{output = 'plot'}, the returned data is the plot containing all layers' information, and can be plot directly or used in grid.arrange; if not set, the raster matrix data will be returned.} @@ -31,7 +40,7 @@ different outputs in the later multiplot using \code{getSpatialMap_comb}.} \item{scale}{A string showing the plot scale, 'identity' or 'sqrt'.} -\item{color}{Most of time you don't have to set this, but if you are not satisfied with the +\item{color}{Most of time you don't have to set this, but if you are not satisfied with the default color, you can set your own palette here. e.g., \code{color = c('red', 'blue')}, then the value from lowest to highest, will have the color from red to blue. More info about color, please check ?palette().} @@ -47,8 +56,9 @@ replot the matrix output from \code{getSpatialMap}, when \code{output = 'data'} value. } \examples{ + \dontrun{ -data(tgridData)# the result of \\code{loadNcdf} +data(tgridData)# the result of \code{loadNcdf} #the output type of has to be default or 'data'. a1 <- getSpatialMap(tgridData, method = 'mean') a2 <- getSpatialMap(tgridData, method = 'max') @@ -66,12 +76,13 @@ getSpatialMap_mat(a6) } -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. \item Hadley Wickham (2007). Reshaping Data with the reshape Package. Journal of Statistical Software, 21(12), 1-20. URL http://www.jstatsoft.org/v21/i12/. @@ -81,14 +92,12 @@ Software, 40(1), 1-29. URL http://www.jstatsoft.org/v40/i01/. \item Original S code by Richard A. Becker and Allan R. Wilks. R version by Ray Brownrigg. Enhancements by Thomas P Minka (2015). maps: Draw Geographical Maps. R package version -2.3-11. http://CRAN.R-project.org/package=maps +2.3-11. https://CRAN.R-project.org/package=maps -\item Roger Bivand and Nicholas Lewin-Koh (2015). maptools: Tools for Reading and Handling Spatial -Objects. R package version 0.8-36. http://CRAN.R-project.org/package=maptools +\item Pebesma, Edzer, and Roger Bivand. 2023a. Sp: Classes and Methods for Spatial Data. https://CRAN.R-project.org/package=sp. \item Roger Bivand and Colin Rundel (2015). rgeos: Interface to Geometry Engine - Open Source (GEOS). R -package version 0.3-11. http://CRAN.R-project.org/package=rgeos +package version 0.3-11. https://CRAN.R-project.org/package=sf } } - diff --git a/man/list2Dataframe.Rd b/man/list2Dataframe.Rd index b43b7bf..6bdc641 100644 --- a/man/list2Dataframe.Rd +++ b/man/list2Dataframe.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/list2dataframe.R \name{list2Dataframe} \alias{list2Dataframe} @@ -15,7 +15,7 @@ The converted dataframe \description{ Convert a list of different time series to a dataframe. Usually the list is the output of \code{extractPeriod} -NOTE: Since it's dataframe, so the dataframes in the input datalist should have the same +NOTE: Since it's dataframe, so the dataframes in the input datalist should have the same date, if not, please use \code{extractPeriod} to process. } \examples{ @@ -26,6 +26,6 @@ datalist_new <- extractPeriod(datalist, commonPeriod = TRUE) dataframe <- list2Dataframe(datalist_new) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ -} +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ +} diff --git a/man/loadNcdf.Rd b/man/loadNcdf.Rd index fe82428..6b2fff0 100644 --- a/man/loadNcdf.Rd +++ b/man/loadNcdf.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ncdf.R \name{loadNcdf} \alias{loadNcdf} @@ -12,15 +12,15 @@ loadNcdf(filePath, varname, tz = "GMT", ...) \item{varname}{A character representing the variable name, you can use \code{getNcdfVar} to get the basic information about the variables and select the target.} -\item{tz}{A string representing the time zone, default is GMT, if you know what time zone is +\item{tz}{A string representing the time zone, default is GMT, if you know what time zone is you can assign it in the argument. If \code{tz = ''}, current time zone will be taken.} -\item{...}{Several arguments including Year, month, lon, lat -type in \code{?downscaleNcdf} for details.You can load while downscale, +\item{...}{Several arguments including Year, month, lon, lat +type in \code{?downscaleNcdf} for details.You can load while downscale, and also first load than use \code{downscaleNcdf} to downscale.} } \value{ -A list object from \code{hyfo} containing the information to be used in the analysis, +A list object from \code{hyfo} containing the information to be used in the analysis, or biascorrection. } \description{ @@ -30,26 +30,26 @@ Load NetCDF file # First open the test NETcDF file. filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -# Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name +# Then if you don't know the variable name, you can use \code{getNcdfVar} to get variable name varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) # you can directly add your downscale information to the argument. nc1 <- loadNcdf(filePath, varname, year = 2006, lon = c(-2, -0.5), lat = c(43.2, 43.7)) -nc2 <- loadNcdf(filePath, varname, year = 2005, month = 3:8, lon = c(-2, -0.5), +nc2 <- loadNcdf(filePath, varname, year = 2005, month = 3:8, lon = c(-2, -0.5), lat = c(43.2, 43.7)) # More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or Earlier) Format Data Files. R package version 1.14.1. -http://CRAN.R-project.org/package=ncdf4 +https://CRAN.R-project.org/package=ncdf4 \item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package version 2.2-6. http://meteo.unican.es/ecoms-udg } } - diff --git a/man/monthlyPreci.Rd b/man/monthlyPreci.Rd index 38cbcfa..0da6004 100644 --- a/man/monthlyPreci.Rd +++ b/man/monthlyPreci.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fillGap.R \name{monthlyPreci} \alias{monthlyPreci} @@ -19,4 +19,3 @@ the monthly rainfall matrix of the rainfall time series. \description{ Get monthly rainfall } - diff --git a/man/plotTS.Rd b/man/plotTS.Rd index 636988b..17705e0 100644 --- a/man/plotTS.Rd +++ b/man/plotTS.Rd @@ -1,18 +1,28 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyzeTS.R \name{plotTS} \alias{plotTS} \title{plot time series, with marks on missing value.} \usage{ -plotTS(..., type = "line", output = "data", plot = "norm", name = NULL, - x = NULL, y = NULL, title = NULL, list = NULL) +plotTS( + ..., + type = "line", + output = "data", + plot = "norm", + name = NULL, + showNA = TRUE, + x = NULL, + y = NULL, + title = NULL, + list = NULL +) } \arguments{ \item{...}{input time series.} \item{type}{A string representing the type of the time series, e.g. 'line' or 'bar'.} -\item{output}{A string showing which type of output you want. Default is "data", if "ggplot", the +\item{output}{A string showing which type of output you want. Default is "data", if "ggplot", the data that can be directly plotted by ggplot2 will be returned, which is easier for you to make series plots afterwards.} @@ -22,6 +32,8 @@ plot, and "cum" gives a cumulative plot. Default is "norm".} \item{name}{If \code{output = 'ggplot'}, name has to be assigned to your output, in order to differentiate different outputs in the later multiplot using \code{plotTS_comb}.} +\item{showNA}{A boolean representing whether the NA values should be marked, default is TRUE.} + \item{x}{label for x axis.} \item{y}{label for y axis.} @@ -37,7 +49,7 @@ A plot of the input time series. plot time series, with marks on missing value. } \details{ -If your input has more than one time series, the program will only plot the common period of +If your input has more than one time series, the program will only plot the common period of different time series. } \examples{ @@ -63,11 +75,11 @@ plotTS(dataframe, dataframe1, plot = 'cum') # and compare them using plotTS_comb. If all data are in one plot, there might be too messy. -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. } } - diff --git a/man/plotTS_comb.Rd b/man/plotTS_comb.Rd index 857dc0d..88908e7 100644 --- a/man/plotTS_comb.Rd +++ b/man/plotTS_comb.Rd @@ -1,11 +1,19 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyzeTS.R \name{plotTS_comb} \alias{plotTS_comb} \title{Combine time seires plot together} \usage{ -plotTS_comb(..., nrow = 1, type = "line", list = NULL, x = "Date", - y = "", title = "", output = FALSE) +plotTS_comb( + ..., + nrow = 1, + type = "line", + list = NULL, + x = "Date", + y = "", + title = "", + output = FALSE +) } \arguments{ \item{...}{different time series plots generated by \code{plotTS(, output = 'ggplot')}, refer to details.} @@ -32,7 +40,7 @@ A combined time series plot. Combine time seires plot together } \details{ -..., representing different ouput file generated by \code{plotTS(, output = 'ggplot'), name = yourname}, +..., representing different ouput file generated by \code{plotTS(, output = 'ggplot'), name = yourname}, different names must be assigned when generating different output. e.g. @@ -46,11 +54,11 @@ a2 <- plotTS(testdl[[2]], output = 'ggplot', name = 2) plotTS_comb(a1, a2) plotTS_comb(list = list(a1, a2), y = 'y axis', nrow = 2) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. } } - diff --git a/man/resample.Rd b/man/resample.Rd index df9e891..059f501 100644 --- a/man/resample.Rd +++ b/man/resample.Rd @@ -1,6 +1,5 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/resample(generic).R -\docType{methods} \name{resample} \alias{resample} \alias{resample,data.frame-method} @@ -28,20 +27,14 @@ Resameple your time series or ncdf files, more info pleae see details. } \details{ Note, when you want to change daily data to monthly data, a new date column will be generated, -usually the date column will be the middle date of each month, 15th, or 16th. However, if your -time series doesn't start from the beginning of a month or ends to the end of a month, e.g. -from 1999-3-14 to 2008-2-2, the first and last generated date could be wrong. Not only the date, but also the data, because you are -not calculating based on a intact month. +usually the date column will be the middle date of each month, 15th, or 16th. However, if your +time series doesn't start from the beginning of a month or ends to the end of a month, e.g. +from 1999-3-14 to 2008-2-2, the first and last generated date could be wrong. Not only the date, but also the data, because you are +not calculating based on a intact month. -It is a generic function, if in your case you need to debug, please see \code{?debug()} +It is a generic function, if in your case you need to debug, please see \code{?debug()} for how to debug S4 method. } -\section{Methods (by class)}{ -\itemize{ -\item \code{data.frame}: - -\item \code{list}: -}} \examples{ # Daily to monthly data(testdl) @@ -50,24 +43,24 @@ str(TS) TS_new <- resample(TS, method = 'day2mon') # Monthly to daily -TS <- data.frame(Date = seq(as.Date('1999-9-15'), length = 30, by = '1 month'), +TS <- data.frame(Date = seq(as.Date('1999-9-15'), length = 30, by = '1 month'), runif(30, 3, 10)) TS_new <- resample(TS, method = 'mon2day') #' # First load ncdf file. filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -varname <- getNcdfVar(filePath) +varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) nc_new <- resample(nc, 'day2mon') -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. } } - diff --git a/man/shp2cat.Rd b/man/shp2cat.Rd index 8fc9890..5c9e044 100644 --- a/man/shp2cat.Rd +++ b/man/shp2cat.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/shp2cat.R \name{shp2cat} \alias{shp2cat} @@ -16,7 +16,7 @@ A catchment object can be used in \code{getSpatialMap()}. Get a catchment object from selected shape file. } \details{ -This function is based on the package \code{rgdal} and \code{sp}, and the output comes from the package +This function is based on the package \code{sf} and \code{sp}, and the output comes from the package \code{sp} } \examples{ @@ -24,15 +24,15 @@ This function is based on the package \code{rgdal} and \code{sp}, and the output file <- system.file("extdata", "testCat.shp", package = "hyfo") catchment <- shp2cat(file) -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ + } \references{ \itemize{ \item Roger Bivand, Tim Keitt and Barry Rowlingson (2015). rgdal: Bindings for the Geospatial Data -Abstraction Library. R package version 1.0-4. http://CRAN.R-project.org/package=rgdal +Abstraction Library. R package version 1.0-4. https://CRAN.R-project.org/package=sf \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for -Statistical Computing, Vienna, Austria. URL http://www.R-project.org/. +Statistical Computing, Vienna, Austria. URL https://www.R-project.org/. } } - diff --git a/man/testCat.Rd b/man/testCat.Rd index 4c7cfbe..a4a463c 100644 --- a/man/testCat.Rd +++ b/man/testCat.Rd @@ -1,14 +1,16 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataDocument.R \docType{data} \name{testCat} \alias{testCat} \title{testCat} -\format{A catchment file generated by library rgdal. +\format{ +A catchment file generated by library sf. \describe{ \item{class}{Formal class 'SpatialPolygonsDataFrame' [package "sp"] with 5 slots} ... -}} +} +} \usage{ testCat } @@ -16,4 +18,3 @@ testCat testCat } \keyword{datasets} - diff --git a/man/testdl.Rd b/man/testdl.Rd index b0f1a94..20bd2e9 100644 --- a/man/testdl.Rd +++ b/man/testdl.Rd @@ -1,16 +1,18 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataDocument.R \docType{data} \name{testdl} \alias{testdl} \title{testdl} -\format{A list consists of 3 different lists. +\format{ +A list consists of 3 different lists. \describe{ \item{AAA}{AAA, a dataframe containing a date column and a value column. } \item{BBB}{BBB, a dataframe containing a date column and a value column.} \item{CCC}{CCC, a dataframe containing a date column and a value column.} ... -}} +} +} \source{ http://meteo.navarra.es/estaciones/mapadeestaciones.cfm http://www4.gipuzkoa.net/oohh/web/esp/02.asp @@ -28,4 +30,3 @@ A list containing different precipitation time series. } } \keyword{datasets} - diff --git a/man/tgridData.Rd b/man/tgridData.Rd index 6fcf41c..ee61870 100644 --- a/man/tgridData.Rd +++ b/man/tgridData.Rd @@ -1,17 +1,19 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataDocument.R \docType{data} \name{tgridData} \alias{tgridData} \title{tgridData} -\format{A list containing different information. +\format{ +A list containing different information. \describe{ \item{Variables}{variable information. } \item{Data}{Data.} \item{xyCoords}{longitude and latitude of the data.} \item{Dates}{Date information.} ... -}} +} +} \source{ http://www.meteo.unican.es/datasets/spain02 } @@ -23,11 +25,10 @@ A list containing different information getting from grid data file, e.g., netcd } \references{ \itemize{ -\item Herrera, S., Ancell, R., Gutierrez, J. M., Pons, M. R., Frias, M. D., & Fernandez, J. -(2012). Development and analysis of a 50-year high-resolution daily gridded precipitation dataset -over Spain (Spain02). International Journal of Climatology +\item Herrera, S., Ancell, R., Gutierrez, J. M., Pons, M. R., Frias, M. D., & Fernandez, J. +(2012). Development and analysis of a 50-year high-resolution daily gridded precipitation dataset +over Spain (Spain02). International Journal of Climatology (http://www.meteo.unican.es/datasets/spain02), 10.1002/joc.2256. } } \keyword{datasets} - diff --git a/man/writeNcdf.Rd b/man/writeNcdf.Rd index f669f25..4f7d404 100644 --- a/man/writeNcdf.Rd +++ b/man/writeNcdf.Rd @@ -1,11 +1,17 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ncdf.R \name{writeNcdf} \alias{writeNcdf} \title{Write to NetCDF file using hyfo list file} \usage{ -writeNcdf(gridData, filePath, missingValue = 1e+20, tz = "GMT", - units = NULL, version = 3) +writeNcdf( + gridData, + filePath, + missingValue = 1e+20, + tz = "GMT", + units = NULL, + version = 3 +) } \arguments{ \item{gridData}{A hyfo list file from \code{\link{loadNcdf}}} @@ -14,13 +20,13 @@ writeNcdf(gridData, filePath, missingValue = 1e+20, tz = "GMT", \item{missingValue}{A number representing the missing value in the NetCDF file, default is 1e20 -#'} - -\item{tz}{A string representing the time zone, default is GMT, if you know what time zone is +#' @param tz A string representing the time zone, default is GMT, if you know what time zone is you can assign it in the argument. If \code{tz = ''}, current time zone will be taken.} -\item{units}{A string showing in which unit you are putting in the NetCDF file, it can be -seconds or days and so on. If not specified, the function will pick up the possible largest +\item{tz}{time zone, default is "GMT"} + +\item{units}{A string showing in which unit you are putting in the NetCDF file, it can be +seconds or days and so on. If not specified, the function will pick up the possible largest time units from \code{c('weeks', 'days', 'hours', 'mins', 'secs')}} \item{version}{ncdf file versions, default is 3, if 4 is chosen, output file will be foreced to version 4.} @@ -36,26 +42,29 @@ Write to NetCDF file using hyfo list file filePath <- system.file("extdata", "tnc.nc", package = "hyfo") -# Then if you don't know the variable name, you can use \\code{getNcdfVar} to get variable name +# Then if you don't know the variable name, you can use \code{getNcdfVar} to get variable name varname <- getNcdfVar(filePath) nc <- loadNcdf(filePath, varname) # Then write to your work directory +\dontrun{ writeNcdf(nc, 'test.nc') +} + + +# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/ -# More examples can be found in the user manual on http://yuanchao-xu.github.io/hyfo/ } \references{ \itemize{ \item David Pierce (2015). ncdf4: Interface to Unidata netCDF (Version 4 or Earlier) Format Data Files. R package version 1.14.1. -http://CRAN.R-project.org/package=ncdf4 +https://CRAN.R-project.org/package=ncdf4 \item Santander MetGroup (2015). ecomsUDG.Raccess: R interface to the ECOMS User Data Gateway. R package version 2.2-6. http://meteo.unican.es/ecoms-udg } } - diff --git a/vignettes/hyfo.Rmd b/vignettes/hyfo.Rmd index 44ee087..1e298af 100644 --- a/vignettes/hyfo.Rmd +++ b/vignettes/hyfo.Rmd @@ -1,6 +1,6 @@ --- -title: '[hyfo Easy Start](http://yuanchao-xu.github.io/hyfo/)' -author: '[Yuanchao Xu](https://dk.linkedin.com/in/xuyuanchao37)' +title: '[hyfo Easy Start](https://yuanchao-xu.github.io/hyfo/)' +author: '[Yuanchao Xu](https://www.linkedin.com/in/xuyuanchao37/)' date: '`r Sys.Date()`' output: pdf_document: @@ -16,7 +16,7 @@ vignette: > # Introduction -**Official Website is [http://yuanchao-xu.github.io/hyfo](http://yuanchao-xu.github.io/hyfo), where manuals and more details can be found.** +**Official Website is [https://yuanchao-xu.github.io/hyfo](http://yuanchao-xu.github.io/hyfo/), where manuals and more details can be found.** hyfo is an R package, initially designed for the European Project EUPORIAS, and cooperated with DHI Denmark, which was then extended to other uses in hydrology, hydraulics and climate. @@ -24,9 +24,8 @@ This package mainly focuses on data process and visulization in hydrology and cl **If you feel hyfo is of a little help, please cite it as following:** -Xu, Yuanchao(2015). hyfo: Hydrology and Climate Forecasting R Package for Data Analysis and Visualization. Retrieved from http://yuanchao-xu.github.io/hyfo/ +Xu, Yuanchao(2015). hyfo: Hydrology and Climate Forecasting R Package for Data Analysis and Visualization. Retrieved from https://yuanchao-xu.github.io/hyfo/ -[Author in this corner](https://dk.linkedin.com/in/xuyuanchao37) #### TIPS * For the hydrology tools part, the minimum time unit is a day, i.e., it mainly focuses on water resource and some long term analysis. For flood analysis part, it will be added in future.