This article presents a couple examples of general workflows in R alone (without using the app), running through the various steps using the functions directly.
This gives you a bit more flexibility in how you explore and/or filter your data.
Let’s work through a couple of examples (note these examples are presented for illustration, but the shape files are not included in the package).
Clinton Creek
Load a shape file defining the region of interest
creek_sf <- st_read("misc/data/Clinton_Creek.shp")
Fetch Lidar DEM (this may take a while the first time)
creek_lidar <- dem_region(creek_sf)
#> Get Lidar data
#> Saving new tiles to cache directory: ~/.local/share/bcaquiferdata
#> Checking for matching tifs
#> Fetching bc_092p002_xli1m_utm10_2019.tif - skipping (new_only = TRUE)
#> Fetching bc_092p013_xli1m_utm10_2019.tif - skipping (new_only = TRUE)
#> Fetching bc_092p012_xli1m_utm10_2019.tif - skipping (new_only = TRUE)
#> Fetching bc_092i092_xli1m_utm10_2019.tif - skipping (new_only = TRUE)
#> Fetching bc_092p003_xli1m_utm10_2019.tif - skipping (new_only = TRUE)
#> Cropping DEM to region
Plot to double check
plot(creek_lidar)
#> downsample set to 44
Collect wells in this region with added elevation from Lidar
creek_wells <- creek_sf |>
wells_subset() |> # Subset to region
wells_elev(creek_lidar) # Add Lidar
#> Subset wells
#> Fixing wells with a bottom lithology interval of zero thickness: 37685
#> Add elevation
Plot again to double check
Export data for Strater, Voxler, and ArcHydro
wells_export(creek_wells, id = "clinton", type = "strater")
#> Writing Strater files ./clinton_strater_lith.csv, ./clinton_strater_collars.csv, ./clinton_strater_wls.csv
#> [1] "./clinton_strater_lith.csv" "./clinton_strater_collars.csv" "./clinton_strater_wls.csv"
wells_export(creek_wells, id = "clinton", type = "voxler")
#> Writing Voxler file ./clinton_voxler.csv
#> [1] "./clinton_voxler.csv"
wells_export(creek_wells, id = "clinton", type = "archydro")
#> Writing ArcHydro files ./clinton_archydro_well.csv, ./clinton_archydro_hguid.csv, ./clinton_archydro_bh.csv
#> [1] "./clinton_archydro_well.csv" "./clinton_archydro_hguid.csv" "./clinton_archydro_bh.csv"
Mill Bay Watershed
Load a shape file defining the region of interest
mill_sf <- st_read("misc/data/MillBayWatershed.shp")
We’ll check against some tiles
g <- ggplot() +
annotation_map_tile(type = "osm", zoomin = -1) +
geom_sf(data = mill_sf, fill = NA, linewidth = 1.5) +
labs(caption = "Data from OpenStreet Map")
g
Fetch Lidar DEM (this may take a while the first time)
mill_lidar <- dem_region(mill_sf)
#> Get Lidar data
#> Saving new tiles to cache directory: ~/.local/share/bcaquiferdata
#> Checking for matching tifs
#> Fetching bc_092b063_xl1m_utm10_2019.tif - skipping (new_only = TRUE)
#> Fetching bc_092b062_xl1m_utm10_2019.tif - skipping (new_only = TRUE)
#> Cropping DEM to region
Add to our plot to double check
mill_lidar_sf <- stars::st_downsample(mill_lidar, n = 12) |> # Downsample first
st_as_sf(as_points = FALSE, merge = TRUE) # Convert to polygons
#> for stars_proxy objects, downsampling only happens for dimensions x and y
Looks like we don’t have elevation data for the whole region. This can be confirmed by checking the online LidarBC map
Let’s take a look our our options using TRIM data.
mill_trim <- dem_region(mill_sf, type = "trim")
#> Get TRIM data
#> checking your existing tiles for mapsheet 92b are up to date
#> Cropping DEM to region
Add to our plot to double check
mill_trim_sf <- mill_trim |>
st_as_sf(as_points = FALSE, merge = TRUE) # Convert to polygons
g + geom_sf(data = mill_trim_sf, aes(fill = elev), colour = NA)
#> Zoom: 13
TRIM is at a coarser resolution, but covers our entire area. Let’s use it instead.
Collect wells in this region with added elevation from TRIM.
mill_wells <- mill_sf |>
wells_subset() |>
wells_elev(mill_trim)
#> Subset wells
#> Fixing wells with a bottom lithology interval of zero thickness: 8966, 29709, 36777, 37733, 48853, 48941, 55042, 56015, 56016, 68632, 69137, 69139, 69141, 75028, 75042, 84493, 84495, 84496, 84498, 84499, 84503, 84536
#> Fixing wells missing depth: 80040, 80041, 80043, 81555, 88357, 94353, 94356
#> Add elevation
Plot again to double check, see that we now elevation data for all wells.
g +
geom_sf(data = mill_wells, size = 1, aes(colour = elev)) +
scale_color_viridis_c(na.value = "red")
#> Zoom: 13
Export data for Strater, Voxler, and ArcHydro
wells_export(mill_wells, id = "mill", type = "strater")
#> Writing Strater files ./mill_strater_lith.csv, ./mill_strater_collars.csv, ./mill_strater_wls.csv
#> [1] "./mill_strater_lith.csv" "./mill_strater_collars.csv" "./mill_strater_wls.csv"
wells_export(mill_wells, id = "mill", type = "voxler")
#> Writing Voxler file ./mill_voxler.csv
#> [1] "./mill_voxler.csv"
wells_export(mill_wells, id = "mill", type = "archydro")
#> Writing ArcHydro files ./mill_archydro_well.csv, ./mill_archydro_hguid.csv, ./mill_archydro_bh.csv
#> [1] "./mill_archydro_well.csv" "./mill_archydro_hguid.csv" "./mill_archydro_bh.csv"
Extra tools
Load cleaned data (will fetch if doesn’t already exist)
wells_lith <- data_read("lithology")
Explore the lithology standardization performed by bcaquiferdata
lith_std <- wells_lith |>
select(well_tag_number, contains("lith")) |>
arrange(!is.na(lithology_category))
lith_std
#> # A tibble: 624,560 × 23
#> well_tag_number lithology_from_ft_bgl lithology_to_ft_bgl lithology_raw_data lithology_descriptio…¹
#> <dbl> <dbl> <dbl> <chr> <chr>
#> 1 11 164 187 "red ash" ""
#> 2 13 1 120 "\"" ""
#> 3 49 0 15 "" ""
#> 4 62 0 0 "backfilled to 217 foot… ""
#> 5 73 25 170 "delimite w/copper ore" ""
#> 6 73 190 380 "copper ore w/delimite" ""
#> 7 88 0 65 "" ""
#> 8 98 0 15 "" ""
#> 9 105 0 15 "" ""
#> 10 163 200 210 "gray,clean a little co… ""
#> # ℹ 624,550 more rows
#> # ℹ abbreviated name: ¹lithology_description_code
#> # ℹ 18 more variables: lithology_material_code <chr>, lithology_hardness_code <chr>,
#> # lithology_colour_code <chr>, lithology_observation <chr>, lithology_from_m <dbl>, lithology_to_m <dbl>,
#> # lithology_raw_combined <chr>, lith_n <int>, lith_rec <int>, flag_lith_nodepths <lgl>,
#> # flag_lith_overruns <lgl>, flag_lith_intervals <lgl>, lithology_clean <chr>, lith_primary <chr>,
#> # lith_secondary <chr>, lith_tertiary <chr>, lithology_extra <chr>, lithology_category <chr>
Save it to peruse later
write_csv(lith_std, "lith_categorization.csv")