Test File 3 test-fm_fit_model.R

Summary

  • Number of test(s) : 2

  • Number of expectation(s) : 33

  • Number of successful expectation(s) : 10

  • Number of failed expectation(s) : 0

  • Number of errored expectation(s) : 0

  • Number of expectations with warning(s) : 12

  • Number of validated skip expectation(s) : 0

  • Number of skipped expectation(s) : 11

Details

Test Description Expectation Result Location Test_time
(code run outside of test_that()) test_that(“fm_fit_model works”, { # You can test your model for small datas or big datas. You must run the tests locally with FISHMAP_UPDATE_OUTPUTS env at TRUE to store the rds file. test_resolution <- Sys.getenv(“FISHMAP_TEST_RESOLUTION”, unset = “small”) if(test_resolution == “small”){ fm_data_inputs <- readr::read_rds( system.file(“examples”, “part1_output_small.rds”, package = “FishMap”) ) }else if (test_resolution == “big”) { ## TODO use correct params for line below fm_data_inputs <- readr::read_rds( file.path(“data”, “part1_output_big.rds”) ) } # run part2 withr::with_seed(1234,{ fm_model_results <- fm_fit_model(fm_data_inputs = fm_data_inputs, SE = 1, data_source = 1, data_obs = 2, samp_process = 0, b_constraint = 2, cov_samp_process = 0, biomass_temporal = 1, sampling_temporal = 0, lf_link = 0, ref_data = “com”, EM = “est_b”, month_ref = 1, compute_sd = FALSE) }) # Update expected outputs here if (Sys.getenv(“FISHMAP_UPDATE_TEST_OUTPUTS”) == “TRUE”) { # save output depending if we are in flat or in test output_inst_dir <- here::here(“inst”, “examples”) if (test_resolution == “small”) { # save all output except sdreport (large file) readr::write_rds(x = fm_model_results[names(fm_model_results) != “SD”], file = file.path(output_inst_dir, paste0(“part2_output_”, test_resolution , “.rds”))) }else if (test_resolution == “big”) { ## TODO what we need to check readr::write_rds(x = fm_model_results[names(fm_model_results) != “SD”], file = file.path(“data”, paste0(“part2_output_”, test_resolution , “.rds”))) } } # check output is saved as rds if (Sys.getenv(“FISHMAP_UPDATE_TEST_OUTPUTS”) == “TRUE”) { output_inst_dir <- here::here(“inst”, “examples”) if (test_resolution == “small”) { #’ @description Test to check if we can save output small expect_true(file.exists(file.path(output_inst_dir, paste0(“part2_output_”, test_resolution , “.rds”)))) }else if (test_resolution == “big”) { #’ @description Test to check if we can save output big expect_true(file.exists(file.path(“data”, paste0(“part2_output_”, test_resolution , “.rds”)))) } } # Check results of model #’ @description Testing the result of fm_fit_model is a list expect_type(object = fm_model_results, “list”) #’ @description Testing names of the list return by fm_fit_model expect_named( object = fm_model_results, expected = c( “time.step_df”, “loc_x” , “report” , “samp_process”, “converge”, “SD”) ) #’ @description Testing types inside the list return by fm_fit_model expect_s3_class(fm_model_results$time.step_df, “data.frame”) expect_s3_class(fm_model_results$loc_x, “data.frame”) expect_type(fm_model_results$report, “list”) expect_type(fm_model_results$samp_process, “double”) expect_type(fm_model_results$converge, “integer”) # expect_s3_class(fm_model_results$SD, “sdreport”) # Testing for small model (without sd report) fm_model_results <- fm_model_results[names(fm_model_results) != “SD”] if(test_resolution == “small”){ expected_outputs <- readr::read_rds( system.file( “examples”, paste0(“part2_output_”, test_resolution , “.rds”), package = “FishMap”) ) }else if(test_resolution == “big”){ expected_outputs <- readr::read_rds( file.path(“data”, paste0(“part2_output_”, test_resolution , “.rds”)) ) } # sort list and data frame elements to avoid order discrepancies resort_all <- function(x) { x <- x[sort(names(x))] result <- lapply(x, function(x) { if (inherits(x, c(“data.frame”, ‘list’))) { x[sort(names(x))] } else{ x } }) result } resorted_result <- resort_all(fm_model_results) resorted_expected <- resort_all(expected_outputs) # remove name attributes (likely generated by a specific version of {sf}) attr(resorted_result$loc_x$long, which = “names”) <- NULL attr(resorted_result$loc_x$lati, which = “names”) <- NULL attr(resorted_expected$loc_x$long, which = “names”) <- NULL attr(resorted_expected$loc_x$lati, which = “names”) <- NULL #’ @description Testing that the result of fm_fit_model is stable expect_equal( object = resorted_result, expected = resorted_expected, tolerance = 1e-4 ) # Testing fm_fit_model() runs for other parameters set #‘@descripton Testing if fm_fit_model work with a second parameter set result_fit_model <- try(fm_fit_model(fm_data_inputs = fm_data_inputs, SE = 1, data_source = 1, data_obs = 2, samp_process = 0, b_constraint = 2, cov_samp_process = 0, biomass_temporal = 0, #changed sampling_temporal = 1, # changed lf_link = 1, # changed ref_data = “sci”, # changed EM = “fix_b”, # changed month_ref = 1, compute_sd = FALSE), silent = TRUE) expect_true( !inherits(result_fit_model, “try-error”), label = “fm_fit_model generates a error when running with second parameter set” ) #’@descripton Testing if fm_fit_model work with a third parameter set result_fit_model <- try(fm_fit_model(fm_data_inputs = fm_data_inputs, SE = 1, data_source = 3, # changed data_obs = 2, samp_process = 0, b_constraint = 2, cov_samp_process = 0, biomass_temporal = 1, sampling_temporal = 0, lf_link = 0, ref_data = “com”, EM = “est_b”, month_ref = 1, compute_sd = FALSE), silent = TRUE) expect_true( !inherits(result_fit_model, “try-error”), label = “fm_fit_model generates a error when running with third parameter set” ) }) ⚠️ Warning test-fm_fit_model.R#3 2023-04-13 12:23:09
(code run outside of test_that()) NA (was skipped) # Update expected outputs here 🔄 Skipped test-fm_fit_model.R#44 NA (was skipped)
(code run outside of test_that()) NA (was skipped) expect_true(file.exists(file.path(output_inst_dir, paste0(“part2_output_”, test_resolution , “.rds”)))) 🔄 Skipped test-fm_fit_model.R#69 NA (was skipped)
(code run outside of test_that()) NA (was skipped) expect_true(file.exists(file.path(“data”, paste0(“part2_output_”, test_resolution , “.rds”)))) 🔄 Skipped test-fm_fit_model.R#72 NA (was skipped)
fm_fit_model works Testing the result of fm_fit_model is a list expect_type(object = fm_model_results, “list”) ⚠️ Warning test-fm_fit_model.R#80 2023-04-13 12:23:09
fm_fit_model works Testing the result of fm_fit_model is a list expect_type(object = fm_model_results, “list”) Success test-fm_fit_model.R#80 2023-04-13 12:23:09
fm_fit_model works Testing names of the list return by fm_fit_model expect_named( object = fm_model_results, expected = c( “time.step_df”, “loc_x” , “report” , “samp_process”, “converge”, “SD”) ) ⚠️ Warning test-fm_fit_model.R#83 2023-04-13 12:23:10
fm_fit_model works Testing names of the list return by fm_fit_model expect_named( object = fm_model_results, expected = c( “time.step_df”, “loc_x” , “report” , “samp_process”, “converge”, “SD”) ) Success test-fm_fit_model.R#83 2023-04-13 12:23:10
fm_fit_model works NA (was skipped) expected = c( 🔄 Skipped test-fm_fit_model.R#85 NA (was skipped)
fm_fit_model works Testing types inside the list return by fm_fit_model expect_s3_class(fm_model_results$time.step_df, “data.frame”) ⚠️ Warning test-fm_fit_model.R#96 2023-04-13 12:23:10
fm_fit_model works Testing types inside the list return by fm_fit_model expect_s3_class(fm_model_results$time.step_df, “data.frame”) Success test-fm_fit_model.R#96 2023-04-13 12:23:10
fm_fit_model works Testing types inside the list return by fm_fit_model expect_s3_class(fm_model_results$loc_x, “data.frame”) ⚠️ Warning test-fm_fit_model.R#97 2023-04-13 12:23:10
fm_fit_model works Testing types inside the list return by fm_fit_model expect_s3_class(fm_model_results$loc_x, “data.frame”) Success test-fm_fit_model.R#97 2023-04-13 12:23:10
fm_fit_model works expect_type(fm_model_results$report, “list”) ⚠️ Warning test-fm_fit_model.R#98 2023-04-13 12:23:10
fm_fit_model works expect_type(fm_model_results$report, “list”) Success test-fm_fit_model.R#98 2023-04-13 12:23:10
fm_fit_model works expect_type(fm_model_results$samp_process, “double”) ⚠️ Warning test-fm_fit_model.R#99 2023-04-13 12:23:10
fm_fit_model works expect_type(fm_model_results$samp_process, “double”) Success test-fm_fit_model.R#99 2023-04-13 12:23:10
fm_fit_model works expect_type(fm_model_results$converge, “integer”) ⚠️ Warning test-fm_fit_model.R#100 2023-04-13 12:23:10
fm_fit_model works expect_type(fm_model_results$converge, “integer”) Success test-fm_fit_model.R#100 2023-04-13 12:23:11
fm_fit_model works NA (was skipped) # expect_s3_class(fm_model_results$SD, “sdreport”) 🔄 Skipped test-fm_fit_model.R#101 NA (was skipped)
fm_fit_model works NA (was skipped) expected_outputs <- readr::read_rds( 🔄 Skipped test-fm_fit_model.R#108 NA (was skipped)
fm_fit_model works NA (was skipped) expected_outputs <- readr::read_rds( 🔄 Skipped test-fm_fit_model.R#115 NA (was skipped)
fm_fit_model works NA (was skipped) resorted_expected <- resort_all(expected_outputs) 🔄 Skipped test-fm_fit_model.R#137 NA (was skipped)
fm_fit_model works NA (was skipped) attr(resorted_expected$loc_x$long, which = “names”) <- NULL 🔄 Skipped test-fm_fit_model.R#142 NA (was skipped)
fm_fit_model works NA (was skipped) attr(resorted_expected$loc_x$lati, which = “names”) <- NULL 🔄 Skipped test-fm_fit_model.R#143 NA (was skipped)
fm_fit_model works Testing that the result of fm_fit_model is stable expect_equal( object = resorted_result, expected = resorted_expected, tolerance = 1e-4 ) ⚠️ Warning test-fm_fit_model.R#146 2023-04-13 12:23:11
fm_fit_model works Testing that the result of fm_fit_model is stable expect_equal( object = resorted_result, expected = resorted_expected, tolerance = 1e-4 ) Success test-fm_fit_model.R#146 2023-04-13 12:23:11
fm_fit_model works NA (was skipped) expected = resorted_expected, 🔄 Skipped test-fm_fit_model.R#148 NA (was skipped)
fm_fit_model works result_fit_model <- try(fm_fit_model(fm_data_inputs = fm_data_inputs, SE = 1, data_source = 1, data_obs = 2, samp_process = 0, b_constraint = 2, cov_samp_process = 0, biomass_temporal = 0, #changed sampling_temporal = 1, # changed lf_link = 1, # changed ref_data = “sci”, # changed EM = “fix_b”, # changed month_ref = 1, compute_sd = FALSE), silent = TRUE) ⚠️ Warning test-fm_fit_model.R#154 2023-04-13 12:23:14
fm_fit_model works expect_true( !inherits(result_fit_model, “try-error”), label = “fm_fit_model generates a error when running with second parameter set” ) ⚠️ Warning test-fm_fit_model.R#170 2023-04-13 12:23:16
fm_fit_model works expect_true( !inherits(result_fit_model, “try-error”), label = “fm_fit_model generates a error when running with second parameter set” ) Success test-fm_fit_model.R#170 2023-04-13 12:23:16
fm_fit_model works expect_true( !inherits(result_fit_model, “try-error”), label = “fm_fit_model generates a error when running with third parameter set” ) ⚠️ Warning test-fm_fit_model.R#192 2023-04-13 12:24:13
fm_fit_model works expect_true( !inherits(result_fit_model, “try-error”), label = “fm_fit_model generates a error when running with third parameter set” ) Success test-fm_fit_model.R#192 2023-04-13 12:24:13