Connecting to the database
Download/update the data and save to your cache.
renmods_update(type = "all")Connect to the database ENMODS table
db <- renmods_connect()
#> ℹ Connecting to "this_yr", "yr_2_5", "yr_5_10", and "historic" data
#> ✔ this_yr: Last downloaded 2026-04-14 11:26:37 (within 1 week(s))
#> ✔ yr_2_5: Last downloaded 2026-04-14 11:27:35 (within 26 week(s))
#> ✔ yr_5_10: Last downloaded 2026-04-14 11:28:33 (within 26 week(s))
#> ✔ historic: Last downloaded 2026-04-14 11:32:12 (within 26 week(s))Optionally, prefilter by data type or date range.
db <- renmods_connect(type = "this_yr")
#> ℹ Connecting to "this_yr" data
#> ✔ this_yr: Last downloaded 2026-04-14 11:26:37 (within 1 week(s))
db <- renmods_connect(dates = c("2024-12-15", "2025-01-15"))
#> ℹ Connecting to "this_yr" and "yr_2_5" data for dates between 2024-12-15 and 2025-01-15
#> ✔ this_yr: Last downloaded 2026-04-14 11:26:37 (within 1 week(s))
#> ✔ yr_2_5: Last downloaded 2026-04-14 11:27:35 (within 26 week(s))
db <- renmods_connect(dates = c("2026-01-01", "2026-01-31"))
#> ℹ Connecting to "this_yr" data for dates between 2026-01-01 and 2026-01-31
#> ✔ this_yr: Last downloaded 2026-04-14 11:26:37 (within 1 week(s))Collecting data into memory
Collect the data into a data frame (from a database connection) for working/saving. The smaller the data set and the fewer data types, the fast this will go.
df <- collect(db)Or use the dplyr package to do more filtering before you collect the data.
First we’ll check out what’s in the data by exploring the first couple data points
glimpse(db)
#> Rows: ??
#> Columns: 70
#> Database: DuckDB 1.4.4 [steffi@Linux 6.17.0-20-generic:R 4.5.2/:memory:]
#> $ Ministry_Contact <chr> "Breanne Hill", "Breanne Hill", "Yihting Lim", "Breanne Hill",…
#> $ Sampling_Agency <chr> "74 - Permittee", "74 - Permittee", "74 - Permittee", "74 - Pe…
#> $ Project <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Project_Name <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Work_Order_number <chr> "VA26A0226", "VA26A1580", NA, "VA26A0232", "VA26A0622", "VA26A…
#> $ Location_ID <chr> "E303052", "E331284", "E206805", "E298312", "E336167", "E33688…
#> $ Location_Name <chr> "MINE WATER TREATMENT PLANT INFLUENT PRETIUM", "BLACKWATER MIN…
#> $ Location_Type <chr> "In-Plant", "River, Stream, or Creek", "River, Stream, or Cree…
#> $ Location_Latitude <dbl> 56.46830, 53.27730, 49.57360, 56.46960, 49.36102, 53.08952, 56…
#> $ Location_Longitude <dbl> -130.1857, -124.8541, -125.5814, -130.1878, -120.4950, -121.59…
#> $ Location_Elevation <dbl> NA, NA, NA, 1400, NA, NA, NA, NA, NA, NA, 382, NA, NA, NA, NA,…
#> $ Location_Elevation_Units <chr> NA, NA, NA, "metre", NA, NA, NA, NA, NA, NA, "metre", NA, NA, …
#> $ Location_Group <chr> NA, NA, "6858", "107835", "261", NA, "8044", "17756", "17679",…
#> $ Field_Visit_Start_Time <dttm> 2026-01-05 15:35:00, 2026-01-17 15:20:00, 2026-01-28 01:00:00…
#> $ Field_Visit_End_Time <dttm> NA, NA, NA, NA, NA, 2026-01-08 15:40:00, NA, NA, NA, NA, 2026…
#> $ Field_Visit_Participants <chr> "B G G W", "BWG", NA, "B G G W", "AN MM", "AH JTD …
#> $ Field_Comment <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Field_Filtered <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,…
#> $ Field_Filtered_Comment <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Field_Preservative <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Field_Device_ID <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Field_Device_Type <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Sampling_Context_Tag <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Collection_Method <chr> "Time Composite: Segmented Discrete", "Grab", "Grab", "Grab", …
#> $ Medium <chr> "Water - Waste", "Water - Fresh", "Water - Fresh", "Water - Fr…
#> $ Taxonomy <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Taxonomy_Common_Name <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Depth_Upper <dbl> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Depth_Lower <dbl> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Depth_Unit <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Observed_Date_Time <dttm> 2026-01-05 15:35:00, 2026-01-17 15:20:00, 2026-01-28 01:00:00…
#> $ Observed_Date_Time_Start <dttm> 2026-01-05 15:35:00, 2026-01-17 15:20:00, 2026-01-28 01:00:00…
#> $ Observed_Date_Time_End <dttm> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
#> $ Observed_Property_ID <chr> "Cadmium Total (fl. conc.)", "Vanadium Dissolved (fl. conc.)",…
#> $ Observed_Property_Description <chr> "Metals - Total; EMS code: CD-T", "Metals - Dissolved; EMS cod…
#> $ Observed_Property_Analysis_Type <chr> "CHEMICAL", "CHEMICAL", "CHEMICAL", "CHEMICAL", "CHEMICAL", "C…
#> $ Observed_Property_Result_Type <chr> "NUMERIC", "NUMERIC", "NUMERIC", "NUMERIC", "NUMERIC", "NUMERI…
#> $ Observed_Property_Name <chr> "CD-T", "V--D", "0102", "AS-T", "MG-D", "NI-D", "V--T", "AL-T"…
#> $ CAS_Number <chr> "7440-43-9", "7440-62-2", NA, "7440-38-2", "7439-95-4", "7440-…
#> $ Result_Value <dbl> 2.8900e-03, 5.0000e-04, 2.7600e+01, 3.5200e-03, 6.2700e+01, 8.…
#> $ Method_Detection_Limit <dbl> 1e-05, 5e-04, 1e+00, 1e-04, 5e-03, 5e-04, 5e-04, 3e-03, 5e-04,…
#> $ Method_Reporting_Limit <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Result_Unit <chr> "mg/L", "mg/L", "mg/L", "mg/L", "mg/L", "mg/L", "mg/L", "mg/L"…
#> $ Detection_Condition <chr> NA, "NOT_DETECTED", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Composite_Stat <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Fraction <chr> "TOTAL", "DISSOLVED", "TOTAL", "TOTAL", "DISSOLVED", "DISSOLVE…
#> $ Data_Classification <chr> "LAB", "LAB", "LAB", "LAB", "LAB", "LAB", "LAB", "LAB", "LAB",…
#> $ Analyzing_Agency <chr> "ALS", "ALS", "CAR", "ALS", "ALS", "ALS", "ALS", "ALS", "ALS",…
#> $ Analyzing_Agency_Full_Name <chr> "ALS Global", "ALS Global", "CARO Environmental Services", "AL…
#> $ Analysis_Method <chr> "Aliquot:HNO3/HCL dig:ICPMS", "Filt.:Acid:ICPMS", "Auto Potent…
#> $ Analyzed_Date_Time <dttm> 2026-01-10 01:00:00, 2026-01-26 01:00:00, 2026-02-05 16:48:00…
#> $ Result_Status <chr> "Preliminary", "Preliminary", "Preliminary", "Preliminary", "P…
#> $ Result_Grade <chr> "Ungraded", "Ungraded", "Ungraded", "Ungraded", "Ungraded", "U…
#> $ Activity_Name <chr> "4793547;REGULAR;;Water - Waste;E303052;2026-01-05T14:35:00-08…
#> $ Tissue_Type <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Lab_Arrival_Temperature <dbl> NA, NA, 9, NA, NA, NA, NA, NA, NA, NA, 6, 12, NA, 3, NA, 17, 9…
#> $ Specimen_Name <chr> "Metals - Total", "Metals - Dissolved", "Major Ions", "Metals …
#> $ Lab_Quality_Flag <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Lab_Arrival_Date_Time <dttm> 2026-01-06 22:15:00, 2026-01-22 12:45:00, 2026-02-02 12:45:00…
#> $ Lab_Prepared_Date_Time <dttm> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
#> $ Lab_Sample_ID <chr> "003", "004", "26B0079-02", "001", "005", "003", "003", "002",…
#> $ Lab_Dilution_Factor <dbl> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Lab_Comment <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Lab_Batch_ID <chr> "VA26A0226", "VA26A1580", "B6B2050", "VA26A0232", "VA26A0622",…
#> $ QC_Type <chr> "NORMAL", "NORMAL", "NORMAL", "NORMAL", "NORMAL", "NORMAL", "N…
#> $ QC_Source_Activity_Name <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA…
#> $ Analysis_Method_ID <chr> "F082", "F032", "X310", "F082", "F032", "F032", "F082", "F082"…
#> $ filename <chr> "/home/steffi/.local/share/R/renmods/this_yr.csv.gz", "/home/s…
#> $ ren_date <date> 2026-01-05, 2026-01-17, 2026-01-28, 2026-01-05, 2026-01-12, 2…
#> $ ren_tz <chr> "-08:00", "-08:00", "-08:00", "-08:00", "-08:00", "-08:00", "-…Remind yourself of the column names
colnames(db)
#> [1] "Ministry_Contact" "Sampling_Agency"
#> [3] "Project" "Project_Name"
#> [5] "Work_Order_number" "Location_ID"
#> [7] "Location_Name" "Location_Type"
#> [9] "Location_Latitude" "Location_Longitude"
#> [11] "Location_Elevation" "Location_Elevation_Units"
#> [13] "Location_Group" "Field_Visit_Start_Time"
#> [15] "Field_Visit_End_Time" "Field_Visit_Participants"
#> [17] "Field_Comment" "Field_Filtered"
#> [19] "Field_Filtered_Comment" "Field_Preservative"
#> [21] "Field_Device_ID" "Field_Device_Type"
#> [23] "Sampling_Context_Tag" "Collection_Method"
#> [25] "Medium" "Taxonomy"
#> [27] "Taxonomy_Common_Name" "Depth_Upper"
#> [29] "Depth_Lower" "Depth_Unit"
#> [31] "Observed_Date_Time" "Observed_Date_Time_Start"
#> [33] "Observed_Date_Time_End" "Observed_Property_ID"
#> [35] "Observed_Property_Description" "Observed_Property_Analysis_Type"
#> [37] "Observed_Property_Result_Type" "Observed_Property_Name"
#> [39] "CAS_Number" "Result_Value"
#> [41] "Method_Detection_Limit" "Method_Reporting_Limit"
#> [43] "Result_Unit" "Detection_Condition"
#> [45] "Composite_Stat" "Fraction"
#> [47] "Data_Classification" "Analyzing_Agency"
#> [49] "Analyzing_Agency_Full_Name" "Analysis_Method"
#> [51] "Analyzed_Date_Time" "Result_Status"
#> [53] "Result_Grade" "Activity_Name"
#> [55] "Tissue_Type" "Lab_Arrival_Temperature"
#> [57] "Specimen_Name" "Lab_Quality_Flag"
#> [59] "Lab_Arrival_Date_Time" "Lab_Prepared_Date_Time"
#> [61] "Lab_Sample_ID" "Lab_Dilution_Factor"
#> [63] "Lab_Comment" "Lab_Batch_ID"
#> [65] "QC_Type" "QC_Source_Activity_Name"
#> [67] "Analysis_Method_ID" "filename"
#> [69] "ren_date" "ren_tz"Now we can filter by column values and collect the data into R
df <- db |>
filter(Location_ID %in% c("E303052", "E309247")) |>
collect()
df
#> # A tibble: 1,193 × 70
#> Ministry_Contact Sampling_Agency Project Project_Name Work_Order_number Location_ID Location_Name
#> <chr> <chr> <chr> <chr> <chr> <chr> <chr>
#> 1 Breanne Hill 74 - Permittee <NA> <NA> VA26A0226 E303052 MINE WATER TREAT…
#> 2 Winnie Chan 74 - Permittee <NA> <NA> FJ2600203 E309247 CONUMA - BRULE M…
#> 3 Winnie Chan 74 - Permittee <NA> <NA> FJ2600007 E309247 CONUMA - BRULE M…
#> 4 Winnie Chan 74 - Permittee <NA> <NA> FJ2600178 E309247 CONUMA - BRULE M…
#> 5 Winnie Chan 74 - Permittee <NA> <NA> FJ2600095 E309247 CONUMA - BRULE M…
#> 6 Winnie Chan 74 - Permittee <NA> <NA> FJ2600009 E309247 CONUMA - BRULE M…
#> 7 Breanne Hill 74 - Permittee <NA> <NA> VA26A1318 E303052 MINE WATER TREAT…
#> 8 Winnie Chan 74 - Permittee <NA> <NA> FJ2600095 E309247 CONUMA - BRULE M…
#> 9 Winnie Chan 74 - Permittee <NA> <NA> FJ2600040 E309247 CONUMA - BRULE M…
#> 10 Breanne Hill 74 - Permittee <NA> <NA> VA26A1318 E303052 MINE WATER TREAT…
#> # ℹ 1,183 more rows
#> # ℹ 63 more variables: Location_Type <chr>, Location_Latitude <dbl>, Location_Longitude <dbl>,
#> # Location_Elevation <dbl>, Location_Elevation_Units <chr>, Location_Group <chr>,
#> # Field_Visit_Start_Time <dttm>, Field_Visit_End_Time <dttm>, Field_Visit_Participants <chr>,
#> # Field_Comment <chr>, Field_Filtered <lgl>, Field_Filtered_Comment <chr>, Field_Preservative <chr>,
#> # Field_Device_ID <chr>, Field_Device_Type <chr>, Sampling_Context_Tag <chr>,
#> # Collection_Method <chr>, Medium <chr>, Taxonomy <chr>, Taxonomy_Common_Name <chr>, …You can also pre-select your columns of interest.
df <- db |>
filter(Location_ID %in% c("E303052", "E309247")) |>
select(
"Location_ID",
"Location_Name",
"ren_date",
"ren_tz",
"Observed_Date_Time",
"Observed_Property_Name",
"Result_Value",
"Result_Unit",
"Analysis_Method_ID"
) |>
collect()
df
#> # A tibble: 1,193 × 9
#> Location_ID Location_Name ren_date ren_tz Observed_Date_Time Observed_Property_Name Result_Value
#> <chr> <chr> <date> <chr> <dttm> <chr> <dbl>
#> 1 E303052 MINE WATER TRE… 2026-01-05 -08:00 2026-01-05 15:35:00 CD-T 0.00289
#> 2 E309247 CONUMA - BRULE… 2026-01-28 -08:00 2026-01-28 12:32:00 V--T 0.001
#> 3 E309247 CONUMA - BRULE… 2026-01-02 -08:00 2026-01-02 13:19:00 P--T 0.008
#> 4 E309247 CONUMA - BRULE… 2026-01-25 -08:00 2026-01-25 13:08:00 SR-T 0.322
#> 5 E309247 CONUMA - BRULE… 2026-01-14 -08:00 2026-01-14 12:59:00 0015 26.8
#> 6 E309247 CONUMA - BRULE… 2026-01-04 -08:00 2026-01-04 14:12:00 AS-T 0.00028
#> 7 E303052 MINE WATER TRE… 2026-01-19 -08:00 2026-01-19 15:45:00 1110 27.9
#> 8 E309247 CONUMA - BRULE… 2026-01-14 -08:00 2026-01-14 12:59:00 0039 206
#> 9 E309247 CONUMA - BRULE… 2026-01-07 -08:00 2026-01-07 13:20:00 BI-T 0.0001
#> 10 E303052 MINE WATER TRE… 2026-01-19 -08:00 2026-01-19 15:45:00 CD-D 0.0000214
#> # ℹ 1,183 more rows
#> # ℹ 2 more variables: Result_Unit <chr>, Analysis_Method_ID <chr>Dates and Times
ENMODS stores date/times as character/text fields with the timezone
indicated by +/- offset (e.g., ‘-08:00’). However, in R all values in a
column must use the same timezone. Therefore renmods converts times to
UTC-7:00 (called Etc/GMT+7, note that + is
correct; see the Wikipedia
page on timezones).
This may not be always ideal depending on the region you’re working
in, so the original timezone from Observed_Date_Time is
stored in a new field ren_tz for reference. You can convert
to any timezone using the lubridate::with_tz()
function.
We can see that we’re in UTC -7 by looking at the first couple of values.
df$Observed_Date_Time[1:5]
#> [1] "2026-01-05 15:35:00 -07" "2026-01-28 12:32:00 -07" "2026-01-02 13:19:00 -07"
#> [4] "2026-01-25 13:08:00 -07" "2026-01-14 12:59:00 -07"If we want to be in UTC -6:00 we can convert with lubridate’s
with_tz() function.
with_tz(df$Observed_Date_Time[1:5], tz = "Etc/GMT+6")
#> [1] "2026-01-05 16:35:00 -06" "2026-01-28 13:32:00 -06" "2026-01-02 14:19:00 -06"
#> [4] "2026-01-25 14:08:00 -06" "2026-01-14 13:59:00 -06"This only converts the first 5 observations, just for illustration.
If we want to convert the entire data frame to Mountain time
(including DST depending on the time of year), we could use the
America/Edmonton timezone.
df_tz <- mutate(
df,
Observed_Date_Time = with_tz(Observed_Date_Time, "America/Edmonton")
)
# Take a look:
df_tz$Observed_Date_Time[1:5]
#> [1] "2026-01-05 15:35:00 MST" "2026-01-28 12:32:00 MST" "2026-01-02 13:19:00 MST"
#> [4] "2026-01-25 13:08:00 MST" "2026-01-14 12:59:00 MST"If you wanted to convert all time columns at once, you could use the
across() function from dplyr.
First we’ll get the whole data set again
df <- renmods_connect(dates = c("2024-12-15", "2025-01-15")) |>
collect()
#> ℹ Connecting to "this_yr" and "yr_2_5" data for dates between 2024-12-15 and 2025-01-15
#> ✔ this_yr: Last downloaded 2026-04-14 11:26:37 (within 1 week(s))
#> ✔ yr_2_5: Last downloaded 2026-04-14 11:27:35 (within 26 week(s))What time columns do we have?
select(df, contains("Time"))
#> # A tibble: 127,293 × 8
#> Field_Visit_Start_Time Field_Visit_End_Time Observed_Date_Time Observed_Date_Time_Start
#> <dttm> <dttm> <dttm> <dttm>
#> 1 2025-01-09 15:27:00 NA 2025-01-09 15:27:00 2025-01-09 15:27:00
#> 2 2025-01-02 11:55:00 NA 2025-01-02 11:55:00 2025-01-02 11:55:00
#> 3 2025-01-02 11:50:00 NA 2025-01-02 11:50:00 2025-01-02 11:50:00
#> 4 2025-01-09 15:25:00 NA 2025-01-09 15:25:00 2025-01-09 15:25:00
#> 5 2025-01-07 01:00:00 NA 2025-01-07 01:00:00 2025-01-07 01:00:00
#> 6 2025-01-08 13:00:00 NA 2025-01-08 13:00:00 2025-01-08 13:00:00
#> 7 2025-01-06 01:00:00 NA 2025-01-06 01:00:00 2025-01-06 01:00:00
#> 8 2025-01-06 12:20:00 NA 2025-01-06 12:20:00 2025-01-06 12:20:00
#> 9 2025-01-08 11:16:00 NA 2025-01-08 11:16:00 2025-01-08 11:16:00
#> 10 2025-01-08 14:05:00 2025-01-08 14:30:00 2025-01-08 14:20:00 2025-01-08 14:20:00
#> # ℹ 127,283 more rows
#> # ℹ 4 more variables: Observed_Date_Time_End <dttm>, Analyzed_Date_Time <dttm>,
#> # Lab_Arrival_Date_Time <dttm>, Lab_Prepared_Date_Time <dttm>We can use the tidyselector contains() with across to
apply a function across a series of columns.
df_tz <- mutate(
df,
across(contains("Time"), \(col) with_tz(col, "America/Edmonton"))
)
# Take a look:
df_tz$Observed_Date_Time[1:5]
#> [1] "2025-01-09 15:27:00 MST" "2025-01-02 11:55:00 MST" "2025-01-02 11:50:00 MST"
#> [4] "2025-01-09 15:25:00 MST" "2025-01-07 01:00:00 MST"
df_tz$Field_Visit_Start_Time[1:5]
#> [1] "2025-01-09 15:27:00 MST" "2025-01-02 11:55:00 MST" "2025-01-02 11:50:00 MST"
#> [4] "2025-01-09 15:25:00 MST" "2025-01-07 01:00:00 MST"Finally, you can also prevent any date/time conversion and leave those fields as character.
df <- renmods_connect(
dates = c("2024-12-15", "2025-01-15"),
convert_times = FALSE
) |>
collect()
#> ℹ Connecting to "this_yr" and "yr_2_5" data for dates between 2024-12-15 and 2025-01-15
#> ✔ this_yr: Last downloaded 2026-04-14 11:26:37 (within 1 week(s))
#> ✔ yr_2_5: Last downloaded 2026-04-14 11:27:35 (within 26 week(s))
# Note columns are character now
select(df, contains("Time"))
#> # A tibble: 127,293 × 8
#> Field_Visit_Start_Time Field_Visit_End_Time Observed_Date_Time Observed_Date_Time_Start
#> <chr> <chr> <chr> <chr>
#> 1 2025-01-09T14:27-08:00 <NA> 2025-01-09T14:27-08:00 2025-01-09T14:27-08:00
#> 2 2025-01-02T10:55-08:00 <NA> 2025-01-02T10:55-08:00 2025-01-02T10:55-08:00
#> 3 2025-01-02T10:50-08:00 <NA> 2025-01-02T10:50-08:00 2025-01-02T10:50-08:00
#> 4 2025-01-09T14:25-08:00 <NA> 2025-01-09T14:25-08:00 2025-01-09T14:25-08:00
#> 5 2025-01-07T00:00-08:00 <NA> 2025-01-07T00:00-08:00 2025-01-07T00:00-08:00
#> 6 2025-01-08T12:00-08:00 <NA> 2025-01-08T12:00-08:00 2025-01-08T12:00-08:00
#> 7 2025-01-06T00:00-08:00 <NA> 2025-01-06T00:00-08:00 2025-01-06T00:00-08:00
#> 8 2025-01-06T11:20-08:00 <NA> 2025-01-06T11:20-08:00 2025-01-06T11:20-08:00
#> 9 2025-01-08T10:16-08:00 <NA> 2025-01-08T10:16-08:00 2025-01-08T10:16-08:00
#> 10 2025-01-08T13:05-08:00 2025-01-08T13:30-08:00 2025-01-08T13:20-08:00 2025-01-08T13:20-08:00
#> # ℹ 127,283 more rows
#> # ℹ 4 more variables: Observed_Date_Time_End <chr>, Analyzed_Date_Time <chr>,
#> # Lab_Arrival_Date_Time <chr>, Lab_Prepared_Date_Time <chr>