Storage

Caching, hashing, and customization

William Michael Landau

2017-11-05

1 Storage basics

When you run make(), drake puts your imports and output targets in a hidden cache, or storage location.

library(drake)
load_basic_example()
config <- make(my_plan, verbose = FALSE)

You can explore your cached data using functions like loadd(), readd(), and cached().

head(cached())
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
## [1] "'report.Rmd'"           "'report.md'"           
## [3] "coef_regression1_large" "coef_regression1_small"
## [5] "coef_regression2_large" "coef_regression2_small"
readd(small)
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
##            x y
## 1 -0.7227425 2
## 2  0.7576700 2
## 3  1.0918126 0
## 4 -0.5503073 3
## 5  1.5503624 0
loadd(large)
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
head(large)
##             x y
## 1 -0.82715861 2
## 2 -0.92468697 2
## 3  0.55105843 0
## 4  1.53942875 2
## 5 -1.99225519 0
## 6 -0.01957744 1
rm(large) # Does not remove `large` from the cache.

2 Caches as R objects

The storr package does the heavy lifting. A storr is an object in R that serves as an abstraction for a storage backend, usually a file system. See the main storr vignette for a thorough walkthrough.

class(config$cache) # from `config <- make(...)`
## [1] "storr" "R6"
cache <- get_cache() # Get the default cache from the last build.
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
class(cache)
## [1] "storr" "R6"
cache$list() # functionality from storr
##  [1] "'report.Rmd'"           "'report.md'"           
##  [3] "coef_regression1_large" "coef_regression1_small"
##  [5] "coef_regression2_large" "coef_regression2_small"
##  [7] "coefficients"           "data.frame"            
##  [9] "knit"                   "large"                 
## [11] "lm"                     "reg1"                  
## [13] "reg2"                   "regression1_large"     
## [15] "regression1_small"      "regression2_large"     
## [17] "regression2_small"      "rpois"                 
## [19] "simulate"               "small"                 
## [21] "stats::rnorm"           "summ_regression1_large"
## [23] "summ_regression1_small" "summ_regression2_large"
## [25] "summ_regression2_small" "summary"               
## [27] "suppressWarnings"
cache$get("small") # functionality from storr
## $type
## [1] "object"
## 
## $value
##            x y
## 1 -0.7227425 2
## 2  0.7576700 2
## 3  1.0918126 0
## 4 -0.5503073 3
## 5  1.5503624 0
## 
## $imported
## [1] FALSE

3 Hash algorithms

The key to storr’s internals is the concept of hashing. Storr uses hashes to label what they store, and drake leverages these hashes to figure out what is up to date and what needs to be (re)built. A hash is like a fingerprint for a piece of data, so the hash should change if the dataset changes. Regardless of the data’s size, the hash always has same number of characters.

library(digest) # package for hashing objects and files
smaller_data <- 12
larger_data <- rnorm(1000)
digest(smaller_data) # compute the hash
## [1] "23c80a31c0713176016e6e18d76a5f31"
digest(larger_data)
## [1] "3abda4b8612e38cc76a52632ca1addb5"

However, different hash algorithms vary in output length.

digest(larger_data, algo = "sha512")
## [1] "f300c04f1b81002c00a54984a19b27f20ef7a9849cc099997241e787df951276270d0e5ad77db2094e54c7db82f7814e41c24682aacc8b9dfa6092a0dbd1aebf"
digest(larger_data, algo = "md5")
## [1] "3abda4b8612e38cc76a52632ca1addb5"
digest(larger_data, algo = "xxhash64")
## [1] "f6aed19423ad328f"
digest(larger_data, algo = "murmur32")
## [1] "c823b2ad"

4 Which hash algorithm should you choose?

Hashing is expensive, and unsurprisingly, shorter hashes are usually faster to compute. So why not always use murmur32? One reason is the risk of collisions: when two different objects have the same hash. In general, shorter hashes have higher risks of collisions. We want our fingerprints to be unique. On the other hand, a longer hash is not always the answer. Besides speed, the decision depends on how we use the output. Drake and storr both use hash keys as names for internal cache files, and in general, file names should respect the 260-character cap on Windows file paths. That is why drake uses a shorter hash algorithm for internal cache-related file names and a longer hash algorithm for everything else.

default_short_hash_algo() # for drake
## [1] "xxhash64"
default_long_hash_algo()
## [1] "sha256"
short_hash(cache)
## [1] "xxhash64"
long_hash(cache)
## [1] "sha256"

5 Select the hash algorithms of the default cache

For new projects, use new_cache() to set the hashes of the default cache.

cache_path(cache) # default cache from before
## [1] "C:\\Users\\c240390\\AppData\\Local\\Temp\\RtmpSQkVhU\\Rbuild2db86da6234\\drake\\vignettes\\.drake"
clean(destroy = TRUE) # start from scratch to reset both hash algorithms
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
tmp <- new_cache(
  path = default_cache_path(), # the `.drake/` folder
  short_hash_algo = "crc32",
  long_hash_algo = "sha1"
)

The cache at default_cache_path() (equivalently, the .drake/ folder) is the default cache used for make().

config <- make(my_plan, verbose = FALSE)
short_hash(config$cache) # would have been xxhash64 (default_short_hash_algo())
## [1] "crc32"
long_hash(config$cache) # would have been sha256 (default_long_hash_algo())
## [1] "sha1"

You can change the long hash algorithm without throwing away the cache, but the project will rebuild from scratch. As for the short hash, you are committed until you delete the cache and its supporting files.

outdated(my_plan, verbose = FALSE) # empty
## character(0)
config$cache <- configure_cache(
  config$cache,
  long_hash_algo = "murmur32",
  overwrite_hash_algos = TRUE
)

Below, the targets become outdated because the existing hash keys do not match the new hash algorithm.

outdated(my_plan, verbose = FALSE)
##  [1] "'report.md'"            "coef_regression1_large"
##  [3] "coef_regression1_small" "coef_regression2_large"
##  [5] "coef_regression2_small" "large"                 
##  [7] "regression1_large"      "regression1_small"     
##  [9] "regression2_large"      "regression2_small"     
## [11] "small"                  "summ_regression1_large"
## [13] "summ_regression1_small" "summ_regression2_large"
## [15] "summ_regression2_small"
config <- make(my_plan, verbose = FALSE)
short_hash(config$cache) # same as before
## [1] "crc32"
long_hash(config$cache) # different from before
## [1] "murmur32"

6 More on custom caches

You do not need to use the default cache whose files are at default_cache_path() (.drake/). However, if you use a different file system, such as the custom faster_cache/ folder below, you will need to manually supply the cache to all functions that require one.

faster_cache <- new_cache(
  path = "faster_cache",
  short_hash_algo = "murmur32",
  long_hash_algo = "murmur32"
)
cache_path(faster_cache)
## [1] "C:\\Users\\c240390\\AppData\\Local\\Temp\\RtmpSQkVhU\\Rbuild2db86da6234\\drake\\vignettes\\faster_cache"
cache_path(cache) # location of the previous cache
## [1] "C:\\Users\\c240390\\AppData\\Local\\Temp\\RtmpSQkVhU\\Rbuild2db86da6234\\drake\\vignettes\\.drake"
short_hash(faster_cache)
## [1] "murmur32"
long_hash(faster_cache)
## [1] "murmur32"
new_plan <- workplan(
  simple = 1 + 1
)
make(new_plan, cache = faster_cache)
## connect 33 imports: rules, report, new_data, lines, envir, additions, smaller...
## connect 1 target: simple
## check 1 item: simple
## target simple
cached(cache = faster_cache)
## [1] "simple"
readd(simple, cache = faster_cache)
## [1] 2

7 Recovering the cache

You can recover an old cache from the file system. You could use storr::storr_rds() directly if you know the short hash algorithm, but this_cache() and recover_cache() are safer for drake.

old_cache <- this_cache("faste_cache") # Get a cache you know exists...
recovered <- recover_cache("faster_cache") # or create a new one if missing.

8 More on storr caches

If you want bypass drake and generate a cache directly from storr, it is best to do so right from the beginning.

library(storr)
my_storr <- storr_rds("my_storr", mangle_key = TRUE)
make(new_plan, cache = my_storr)
## Unloading targets from environment:
##   simple
## connect 34 imports: rules, report, new_data, lines, envir, additions, smaller...
## connect 1 target: simple
## check 1 item: simple
## target simple
cached(cache = my_storr)
## [1] "simple"
readd(simple, cache = my_storr)
## [1] 2

Drake supports storr_rds() caches. Other caches may be possible, but they should have a storr-like API and namespace support.

9 In-memory caches

Some caches store your data in the computer’s memory rather than saved files. Drake can make use of these in-memory caches, but not with any kind of parallel computing. In other words, when you call make(), the parallelism argument cannot be "Makefile" and jobs must be 1 (default). Also, keep in mind that unless you save your workspace, your in-memory cache will disappear when you close your R session.

memory_cache <- storr_environment()
other_plan <- workplan(
  some_data = rnorm(50),
  more_data = rpois(75, lambda = 10),
  result = mean(c(some_data, more_data))
)
make(other_plan, cache = memory_cache)
## connect 37 imports: rules, report, new_data, simple, lines, envir, additions,...
## connect 3 targets: some_data, more_data, result
## check 4 items: rnorm, rpois, c, mean
## import rnorm
## import rpois
## import c
## import mean
## check 2 items: some_data, more_data
## target some_data
## target more_data
## check 1 item: result
## target result
cached(cache = memory_cache)
## [1] "c"         "mean"      "more_data" "result"    "rnorm"     "rpois"    
## [7] "some_data"
readd(result, cache = memory_cache)
## [1] 6.315174

10 Cache types

Drake has functions to help you create caches with known supported types.

default_cache_type()
## [1] "storr_rds"
cache_types()
## [1] "storr_rds"         "storr_environment"
in_memory_cache_types()
## [1] "storr_environment"
env <- new.env()
my_type <- new_cache(type = "storr_environment")
my_type_2 <- new_cache(type = "storr_environment", envir = env)
ls(env)
## [1] "data"           "hash_algorithm" "keys"

For new in-memory caches, please use new_cache() rather than get_cache() or recover_cache().

11 Cleaning up

If you want to start from scratch, you can clean() the cache. Use the destroy argument to remove it completely. cache$del() and cache$destroy() are also options, but they leave output file targets dangling. By contrast, clean(destroy = TRUE) removes file targets generated by drake::make().

clean(small, large)
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
cached() # 'small' and 'large' are gone
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
##  [1] "'report.Rmd'"           "'report.md'"           
##  [3] "coef_regression1_large" "coef_regression1_small"
##  [5] "coef_regression2_large" "coef_regression2_small"
##  [7] "coefficients"           "data.frame"            
##  [9] "knit"                   "lm"                    
## [11] "reg1"                   "reg2"                  
## [13] "regression1_large"      "regression1_small"     
## [15] "regression2_large"      "regression2_small"     
## [17] "rpois"                  "simulate"              
## [19] "stats::rnorm"           "summ_regression1_large"
## [21] "summ_regression1_small" "summ_regression2_large"
## [23] "summ_regression2_small" "summary"               
## [25] "suppressWarnings"
clean(destroy = TRUE)
## cache C:/Users/c240390/AppData/Local/Temp/RtmpSQkVhU/Rbuild2db86da6234/drake/...
clean(destroy = TRUE, cache = faster_cache)
clean(destroy = TRUE, cache = my_storr)