diff --git a/.lintr b/.lintr index be4e2eddd..8244162db 100644 --- a/.lintr +++ b/.lintr @@ -1,12 +1,19 @@ linters: linters_with_defaults( commented_code_linter = NULL, - cyclocomp_linter = cyclocomp_linter(25), + cyclocomp_linter = cyclocomp_linter(40), + fixed_regex_linter = NULL, + function_argument_linter = NULL, + indentation_linter = NULL, line_length_linter(120), + namespace_linter = NULL, + nested_ifelse_linter = NULL, object_name_linter = NULL, - object_length_linter(50), + object_length_linter(70), object_usage_linter = NULL, todo_comment_linter = NULL, extraction_operator_linter = NULL, + nonportable_path_linter = NULL, + string_boundary_linter = NULL, undesirable_function_linter = NULL, undesirable_operator_linter = NULL, defaults = linters_with_tags(tags = NULL) @@ -14,5 +21,7 @@ linters: linters_with_defaults( exclusions: list( "inst", "man", - "tests" + "tests", + "touchstone", + "vignettes" ) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index de90831d5..b2ee8bef4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,11 +20,22 @@ repos: tests/testmanual/addins/.*invalid.*| tests/testmanual/addins/r-valid\.R| )$ + - id: lintr + additional_dependencies: + - r-lib/lintr + exclude: > + (?x)^( + inst/.*| + man/.*| + tests/.*| + touchstone/.*| + vignettes/.*| + )$ - id: roxygenize additional_dependencies: - r-lib/pkgapi - dplyr@1.0.9 - - roxygen2@7.2.1 + - roxygen2@7.2.2 - id: use-tidy-description - id: spell-check exclude: > diff --git a/R/communicate.R b/R/communicate.R index e0a2b314b..5ca445f47 100644 --- a/R/communicate.R +++ b/R/communicate.R @@ -23,7 +23,7 @@ communicate_warning <- function(changed, transformers) { #' @keywords internal communicate_summary <- function(changed, ruler_width) { if (!getOption("styler.quiet", FALSE)) { - cli::cat_rule(width = max(40, ruler_width)) + cli::cat_rule(width = max(40L, ruler_width)) cat("Status\tCount\tLegend \n") cli::cat_bullet( "\t", sum(!changed, na.rm = TRUE), "\tFile unchanged.", @@ -36,7 +36,7 @@ communicate_summary <- function(changed, ruler_width) { cli::cat_bullet( bullet = "cross", "\t", sum(is.na(changed)), "\tStyling threw an error." ) - cli::cat_rule(width = max(40, ruler_width)) + cli::cat_rule(width = max(40L, ruler_width)) } } diff --git a/R/detect-alignment-utils.R b/R/detect-alignment-utils.R index aecc1d49d..9be7d0871 100644 --- a/R/detect-alignment-utils.R +++ b/R/detect-alignment-utils.R @@ -62,7 +62,7 @@ alignment_drop_comments <- function(pd_by_line) { alignment_drop_last_expr <- function(pds_by_line) { # TODO could be skipped if we know it's not a function dec pd_last_line <- pds_by_line[[length(pds_by_line)]] - last_two_lines <- pd_last_line$token[c(nrow(pd_last_line) - 1, nrow(pd_last_line))] + last_two_lines <- pd_last_line$token[c(nrow(pd_last_line) - 1L, nrow(pd_last_line))] if (identical(last_two_lines, c("')'", "expr"))) { pd_last_line <- pd_last_line[-nrow(pd_last_line), ] } @@ -81,7 +81,7 @@ alignment_drop_last_expr <- function(pds_by_line) { alignment_ensure_trailing_comma <- function(pd_by_line) { last_pd <- last(pd_by_line) # needed to make sure comma is added without space - last_pd$spaces[nrow(last_pd)] <- 0 + last_pd$spaces[nrow(last_pd)] <- 0L if (last(last_pd$token) == "','") { return(pd_by_line) } else { @@ -94,7 +94,7 @@ alignment_ensure_trailing_comma <- function(pd_by_line) { stylerignore = last_pd$stylerignore[1L], indents = last_pd$indent[1L] ) - tokens$.lag_spaces <- 0 + tokens$.lag_spaces <- 0L tokens$lag_newlines <- tokens$pos_id <- NULL pd_by_line[[length(pd_by_line)]] <- rbind(last_pd, tokens) @@ -180,7 +180,7 @@ alignment_has_correct_spacing_around_comma <- function(pd_sub) { if (length(comma_tokens) == 0L) { return(TRUE) } - relevant_comma_token <- comma_tokens[seq2(1, length(comma_tokens) - 1L)] + relevant_comma_token <- comma_tokens[seq2(1L, length(comma_tokens) - 1L)] correct_spaces_before <- pd_sub$.lag_spaces[relevant_comma_token] == 0L correct_spaces_after <- pd_sub$spaces[relevant_comma_token] > 0L all(correct_spaces_before) && all(correct_spaces_after) diff --git a/R/detect-alignment.R b/R/detect-alignment.R index b08ddfeb3..63b417bfe 100644 --- a/R/detect-alignment.R +++ b/R/detect-alignment.R @@ -122,7 +122,7 @@ token_is_on_aligned_line <- function(pd_flat) { max_previous_col <- max(current_col) # first col has no leading , - current_col <- nchar(by_line) - as.integer(column > 1) + current_col <- nchar(by_line) - as.integer(column > 1L) # Problem `by_line` counting from comma before column 3, previous_line # counting 1 space before ~ if (column > 1L) { @@ -139,7 +139,7 @@ token_is_on_aligned_line <- function(pd_flat) { current_col <- "^(,[\\s\\t]*)[^ ]*.*$" %>% gsub("\\1", by_line, perl = TRUE) %>% nchar() %>% - magrittr::subtract(1) + magrittr::subtract(1L) if (column > 1L) { # must add previous columns, as first column might not align diff --git a/R/environments.R b/R/environments.R index 0f895517c..c6b8987e4 100755 --- a/R/environments.R +++ b/R/environments.R @@ -44,10 +44,10 @@ parser_version_get <- function() { #' @rdname parser_version_set parser_version_find <- function(pd) { ifelse(any(pd$token == "equal_assign"), - 2, + 2L, ifelse(any(pd$token == "expr_or_assign_or_help"), - 3, - 1 + 3L, + 1L ) ) } diff --git a/R/indent.R b/R/indent.R index 8ff7d26b7..7cc21d9f2 100644 --- a/R/indent.R +++ b/R/indent.R @@ -21,7 +21,7 @@ indent_without_paren_for_while_fun <- function(pd, indent_by) { return(pd) } - if (pd$newlines[length(pd$newlines) - 1] == 0L) { + if (pd$newlines[length(pd$newlines) - 1L] == 0L) { return(pd) } pd$indent[nrow] <- indent_by @@ -60,7 +60,7 @@ indent_without_paren_if_else <- function(pd, indent_by) { ] > 0L if (has_else_without_curly_or_else_chid && needs_indention_now) { - pd$indent[seq(else_idx + 1, nrow(pd))] <- indent_by + pd$indent[seq(else_idx + 1L, nrow(pd))] <- indent_by } pd } @@ -108,7 +108,7 @@ compute_indent_indices <- function(pd, if (is.na(trigger)) { return(numeric(0L)) } - start <- trigger + 1 + start <- trigger + 1L if (is.null(token_closing)) { stop <- npd } else { diff --git a/R/io.R b/R/io.R index 2bc615d17..2598a0b00 100644 --- a/R/io.R +++ b/R/io.R @@ -103,7 +103,7 @@ read_utf8_bare <- function(con, warn = TRUE) { "The file ", con, " is not encoded in UTF-8. ", "These lines contain invalid UTF-8 characters: " ), - paste(c(utils::head(i), if (n > 6) "..."), collapse = ", ") + toString(c(utils::head(i), if (n > 6L) "...")) ) } x diff --git a/R/nest.R b/R/nest.R index aab93d9f9..8c18f36a5 100644 --- a/R/nest.R +++ b/R/nest.R @@ -151,7 +151,7 @@ drop_cached_children <- function(pd) { #' @keywords internal find_pos_id_to_keep <- function(pd) { if (pd$is_cached[1L]) { - pd$pos_id[pd$parent <= 0] + pd$pos_id[pd$parent <= 0L] } else { pd$pos_id } @@ -323,7 +323,7 @@ add_attributes_caching <- function(pd_flat, transformers, more_specs) { #' @keywords internal set_spaces <- function(spaces_after_prefix, force_one) { if (force_one) { - rep(1, length(spaces_after_prefix)) + rep(1L, length(spaces_after_prefix)) } else { pmax(spaces_after_prefix, 1L) } diff --git a/R/parse.R b/R/parse.R index cf3814909..6dd394776 100644 --- a/R/parse.R +++ b/R/parse.R @@ -49,7 +49,7 @@ has_crlf_as_first_line_sep <- function(message, initial_text) { start_char <- as.numeric(split[3L]) offending_line <- initial_text[as.integer(split[2L])] if (!is.na(offending_line)) { - if (substr(offending_line, start_char, start_char + 1) == "\r\n") { + if (substr(offending_line, start_char, start_char + 1L) == "\r\n") { return(TRUE) } } @@ -119,7 +119,7 @@ get_parse_data <- function(text, include_text = TRUE, ...) { #' @keywords internal add_id_and_short <- function(pd) { pd$pos_id <- seq2(1L, nrow(pd)) - pd$short <- substr(pd$text, 1, 5) + pd$short <- substr(pd$text, 1L, 5L) pd } diff --git a/R/relevel.R b/R/relevel.R index 70b8b6a65..8d6a0ebff 100644 --- a/R/relevel.R +++ b/R/relevel.R @@ -27,7 +27,7 @@ flatten_operators_one <- function(pd_nested) { pd_token_left <- c(special_token, "PIPE", math_token, "'$'") pd_token_right <- c( special_token, "PIPE", "LEFT_ASSIGN", - if (parser_version_get() > 1) "EQ_ASSIGN", + if (parser_version_get() > 1L) "EQ_ASSIGN", "'+'", "'-'", "'~'" ) pd_nested %>% @@ -99,7 +99,7 @@ bind_with_child <- function(pd_nested, pos) { wrap_expr_in_expr <- function(pd) { create_tokens( "expr", "", - pos_ids = create_pos_ids(pd, 1, after = FALSE), + pos_ids = create_pos_ids(pd, 1L, after = FALSE), child = pd, terminal = FALSE, stylerignore = pd$stylerignore[1L], @@ -145,7 +145,7 @@ wrap_expr_in_expr <- function(pd) { #' ) #' @keywords internal relocate_eq_assign <- function(pd) { - if (parser_version_get() < 2) { + if (parser_version_get() < 2L) { post_visit_one(pd, relocate_eq_assign_nest) } else { pd @@ -239,7 +239,7 @@ relocate_eq_assign_one <- function(pd) { #' @keywords internal add_line_col_to_wrapped_expr <- function(pd) { - if (nrow(pd) > 1) abort("pd must be a wrapped expression that has one row.") + if (nrow(pd) > 1L) abort("pd must be a wrapped expression that has one row.") pd$line1 <- pd$child[[1L]]$line1[1L] pd$line2 <- last(pd$child[[1L]]$line2) pd$col1 <- pd$child[[1L]]$col1[1L] diff --git a/R/roxygen-examples-add-remove.R b/R/roxygen-examples-add-remove.R index a29e14b3c..3a7ecb40b 100644 --- a/R/roxygen-examples-add-remove.R +++ b/R/roxygen-examples-add-remove.R @@ -8,7 +8,7 @@ remove_dont_mask <- function(roxygen) { 1L, 2L, if (roxygen[3L] == "\n") 3L, last(which(roxygen == "}")) ) %>% sort() list( - code = roxygen[-mask], mask = paste(roxygen[seq2(1, 2)], collapse = "") + code = roxygen[-mask], mask = paste(roxygen[seq2(1L, 2L)], collapse = "") ) } diff --git a/R/rules-line-breaks.R b/R/rules-line-breaks.R index fec8c359e..01ec147d1 100644 --- a/R/rules-line-breaks.R +++ b/R/rules-line-breaks.R @@ -75,7 +75,7 @@ set_line_break_before_curly_opening <- function(pd) { linebreak_before_curly <- ifelse(is_function_call(pd), # if in function call and has pipe, it is not recognized as function call # and goes to else case - any(pd$lag_newlines[seq2(1, line_break_to_set_idx[1L])] > 0L), + any(pd$lag_newlines[seq2(1L, line_break_to_set_idx[1L])] > 0L), # if not a function call, only break line if it is a pipe followed by {} pd$token[line_break_to_set_idx] %in% c("SPECIAL-PIPE", "PIPE") ) @@ -110,7 +110,7 @@ set_line_break_before_curly_opening <- function(pd) { # non-curly expressions after curly expressions must have line breaks if (length(should_not_be_on_same_line_idx) > 0L) { comma_exprs_idx <- which(pd$token == "','") - comma_exprs_idx <- setdiff(comma_exprs_idx, 1 + is_not_curly_curly_idx) + comma_exprs_idx <- setdiff(comma_exprs_idx, 1L + is_not_curly_curly_idx) non_comment_after_comma <- map_int(comma_exprs_idx, next_non_comment, pd = pd @@ -310,7 +310,7 @@ set_line_break_after_opening_if_call_is_multi_line <- function(pd, return(pd) } break_pos <- find_line_break_position_in_multiline_call(pd) - idx_nested <- next_non_comment(pd, 2) + idx_nested <- next_non_comment(pd, 2L) if (pd_is_multi_line(pd$child[[idx_nested]]) && sum(pd$lag_newlines) > 0L) { break_pos <- c(break_pos, idx_nested) } @@ -407,7 +407,7 @@ set_line_break_after_ggplot2_plus <- function(pd) { which(lead(pd$token == "COMMENT")) ) - pd$lag_newlines[plus_without_comment_after + 1] <- 1L + pd$lag_newlines[plus_without_comment_after + 1L] <- 1L } } } diff --git a/R/rules-tokens.R b/R/rules-tokens.R index 3be21f1d6..69d8b4cde 100644 --- a/R/rules-tokens.R +++ b/R/rules-tokens.R @@ -115,7 +115,7 @@ wrap_multiline_curly <- function(pd, indent_by, key_token, space_after = 1L) { pd, all_to_be_wrapped_ind, indent_by, space_after ) - if (nrow(pd) > 5L) pd$lag_newlines[6] <- 0L + if (nrow(pd) > 5L) pd$lag_newlines[6L] <- 0L } pd } @@ -126,7 +126,7 @@ wrap_multiline_curly <- function(pd, indent_by, key_token, space_after = 1L) { #' already wrapped into a such. #' @inheritParams wrap_multiline_curly #' @keywords internal -wrap_else_multiline_curly <- function(pd, indent_by = 2, space_after = 0L) { +wrap_else_multiline_curly <- function(pd, indent_by = 2L, space_after = 0L) { if (contains_else_expr(pd) && pd_is_multi_line(pd) && contains_else_expr_that_needs_braces(pd) && diff --git a/R/set-assert-args.R b/R/set-assert-args.R index 1787ba246..cd571d8f8 100644 --- a/R/set-assert-args.R +++ b/R/set-assert-args.R @@ -102,7 +102,7 @@ assert_tokens <- function(tokens) { invalid_tokens <- tokens[!(tokens %in% lookup_tokens()$token)] if (length(invalid_tokens) > 0L) { abort(paste( - "Token(s)", paste0(invalid_tokens, collapse = ", "), "are invalid.", + "Token(s)", toString(invalid_tokens), "are invalid.", "You can lookup all valid tokens and their text", "with styler:::lookup_tokens(). Make sure you supply the values of", "the column 'token', not 'text'." diff --git a/R/style-guides.R b/R/style-guides.R index 444fe5318..4ed47a067 100644 --- a/R/style-guides.R +++ b/R/style-guides.R @@ -486,7 +486,7 @@ scope_normalize <- function(scope, name = substitute(scope)) { if (!all((scope %in% levels))) { abort(paste( "all values in", name, "must be one of the following:", - paste(levels, collapse = ", ") + toString(levels) )) } diff --git a/R/stylerignore.R b/R/stylerignore.R index 80456562d..2875185e3 100644 --- a/R/stylerignore.R +++ b/R/stylerignore.R @@ -13,7 +13,7 @@ #' @keywords internal env_add_stylerignore <- function(pd_flat) { if (!env_current$any_stylerignore) { - env_current$stylerignore <- pd_flat[0, ] + env_current$stylerignore <- pd_flat[0L, ] return() } pd_flat_temp <- pd_flat[pd_flat$terminal | pd_flat$is_cached, ] %>% diff --git a/R/testing-public-api.R b/R/testing-public-api.R index 6464cec1b..0cc325414 100644 --- a/R/testing-public-api.R +++ b/R/testing-public-api.R @@ -43,7 +43,7 @@ test_dry <- function(path, styler, styled = FALSE) { summary <- styler(path, dry = "on") checker <- ifelse(styled, testthat::expect_false, testthat::expect_true) checker(summary$changed) - testthat::expect_true(identical(before, readLines(path))) + testthat::expect_identical(before, readLines(path)) if (styled) { testthat::expect_error(styler(path, dry = "fail"), NA) diff --git a/R/testing.R b/R/testing.R index 96aca3ae8..076849363 100644 --- a/R/testing.R +++ b/R/testing.R @@ -235,11 +235,11 @@ copy_to_tempdir <- function(path_perm = testthat_file()) { #' @keywords internal n_times_faster_with_cache <- function(x1, x2 = x1, ..., fun = styler::style_text, - n = 3, + n = 3L, clear = "always") { rlang::arg_match(clear, c("always", "final", "never", "all but last")) - out <- purrr::map(1:n, n_times_faster_bench, + out <- purrr::map(1L:n, n_times_faster_bench, x1 = x1, x2 = x2, fun = fun, ..., n = n, clear = clear ) @@ -258,7 +258,7 @@ n_times_faster_bench <- function(i, x1, x2, fun, ..., n, clear) { first <- system.time(fun(x1, ...)) if (is.null(x2)) { - second <- c(elapsed = 1) + second <- c(elapsed = 1L) } else { second <- system.time(fun(x2, ...)) } @@ -368,7 +368,7 @@ test_transformers_drop <- function(transformers) { rlang::abort(paste( "transformers_drop specifies exclusion rules for transformers that ", "are not in the style guilde. Please add the rule to the style guide ", - "or remove the dropping rules:", paste(diff, collapse = ", ") + "or remove the dropping rules:", toString(diff) )) } }) diff --git a/R/token-create.R b/R/token-create.R index 279d54e97..35317f7da 100644 --- a/R/token-create.R +++ b/R/token-create.R @@ -43,7 +43,7 @@ create_tokens <- function(tokens, list( token = tokens, text = texts, - short = substr(texts, 1, 5), + short = substr(texts, 1L, 5L), lag_newlines = lag_newlines, newlines = lead(lag_newlines), pos_id = pos_ids, @@ -85,7 +85,7 @@ create_pos_ids <- function(pd, pos, by = 0.1, after = FALSE, n = 1L) { } first <- find_start_pos_id(pd, pos, by, direction, after) new_ids <- seq(first, - to = first + direction * (n - 1) * by, by = by * direction + to = first + direction * (n - 1L) * by, by = by * direction ) validate_new_pos_ids(new_ids, after) new_ids diff --git a/R/transform-files.R b/R/transform-files.R index 11d833c0b..37934ae7c 100644 --- a/R/transform-files.R +++ b/R/transform-files.R @@ -19,7 +19,7 @@ transform_files <- function(files, transformer <- make_transformer( transformers, include_roxygen_examples, base_indention ) - max_char <- min(max(nchar(files), 0), getOption("width")) + max_char <- min(max(nchar(files), 0L), getOption("width")) len_files <- length(files) if (len_files > 0L && !getOption("styler.quiet", FALSE)) { cat("Styling ", len_files, " files:\n") @@ -53,8 +53,8 @@ transform_file <- function(path, message_after_if_changed = " *", ..., dry) { - char_after_path <- nchar(message_before) + nchar(path) + 1 - max_char_after_message_path <- nchar(message_before) + max_char_path + 1 + char_after_path <- nchar(message_before) + nchar(path) + 1L + max_char_after_message_path <- nchar(message_before) + max_char_path + 1L n_spaces_before_message_after <- max_char_after_message_path - char_after_path if (!getOption("styler.quiet", FALSE)) { diff --git a/R/ui-styling.R b/R/ui-styling.R index ccbd40f3c..0a840d16d 100644 --- a/R/ui-styling.R +++ b/R/ui-styling.R @@ -267,6 +267,7 @@ style_dir <- function(path = ".", invisible(changed) } +# nolint: start #' Prettify R code in current working directory #' #' This is a helper function for style_dir. @@ -300,11 +301,16 @@ prettify_any <- function(transformers, } else { files_other <- NULL } + transform_files( setdiff(c(files_root, files_other), exclude_files), - transformers, include_roxygen_examples, base_indention, dry + transformers, + include_roxygen_examples, + base_indention, + dry ) } +# nolint: end #' Style files with R source code #' diff --git a/R/unindent.R b/R/unindent.R index 7aeff5ccb..8596faebb 100644 --- a/R/unindent.R +++ b/R/unindent.R @@ -14,9 +14,9 @@ set_unindention_child <- function(pd, token = "')'", unindent_by) { } first_on_last_line <- last( - c(1, which(pd$lag_newlines > 0L | pd$multi_line > 0L)) + c(1L, which(pd$lag_newlines > 0L | pd$multi_line > 0L)) ) - on_same_line <- seq2(first_on_last_line, closing - 1) + on_same_line <- seq2(first_on_last_line, closing - 1L) cand_ind <- setdiff(on_same_line, which(pd$terminal)) if (length(cand_ind) < 1L) { diff --git a/R/utils-cache.R b/R/utils-cache.R index dd063f8a6..2465a51a2 100644 --- a/R/utils-cache.R +++ b/R/utils-cache.R @@ -170,7 +170,7 @@ cache_by_expression <- function(text, # was removed via parse, same as it is in cache_by_expression) and add the # base indention. expressions[expressions$parent == 0L & expressions$token != "COMMENT" & !expressions$stylerignore, "text"] %>% - map(~ cache_write(.x, transformers = transformers, more_specs)) + map(cache_write, transformers = transformers, more_specs) } diff --git a/R/utils-navigate-nest.R b/R/utils-navigate-nest.R index d1a84ff63..b3fa38212 100644 --- a/R/utils-navigate-nest.R +++ b/R/utils-navigate-nest.R @@ -12,7 +12,7 @@ #' @family third-party style guide helpers #' @export next_non_comment <- function(pd, pos) { - if (length(pos) < 1 || is.na(pos) || pos >= nrow(pd)) { + if (length(pos) < 1L || is.na(pos) || pos >= nrow(pd)) { return(integer(0L)) } candidates <- seq2(pos + 1L, nrow(pd)) @@ -25,7 +25,7 @@ next_non_comment <- function(pd, pos) { #' @export #' @rdname next_non_comment previous_non_comment <- function(pd, pos) { - if (length(pos) < 1 || is.na(pos) || pos > nrow(pd)) { + if (length(pos) < 1L || is.na(pos) || pos > nrow(pd)) { return(integer(0L)) } candidates <- seq2(1L, pos - 1L) @@ -67,17 +67,17 @@ next_terminal <- function(pd, stack = FALSE, vars = c("pos_id", "token", "text"), tokens_exclude = NULL) { - pd$position <- seq2(1, nrow(pd)) + pd$position <- seq2(1L, nrow(pd)) pd <- pd[!(pd$token %in% tokens_exclude), ] if (pd$terminal[1L]) { - pd[1, c("position", vars)] + pd[1L, c("position", vars)] } else { current <- next_terminal( pd$child[[1L]], stack = stack, vars = vars, tokens_exclude = tokens_exclude ) if (stack) { - bind_rows(pd[1, c("position", vars)], current) + bind_rows(pd[1L, c("position", vars)], current) } else { current } @@ -94,7 +94,7 @@ extend_if_comment <- function(pd, pos) { if (pos == nrow(pd)) { return(pos) } - if (pd$token[pos + 1] == "COMMENT") { + if (pd$token[pos + 1L] == "COMMENT") { extend_if_comment(pd, pos + 1L) } else { pos diff --git a/R/zzz.R b/R/zzz.R index c63b44f3f..cc838d3a5 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -25,7 +25,7 @@ ask_to_switch_to_non_default_cache_root <- function(ask = interactive()) { - if (ask && stats::runif(1) > 0.9 && is.null(getOption("styler.cache_root"))) { + if (ask && stats::runif(1L) > 0.9 && is.null(getOption("styler.cache_root"))) { ask_to_switch_to_non_default_cache_root_impl() options(styler.cache_root = "styler") } @@ -44,7 +44,7 @@ remove_old_cache_files <- function() { R.cache::getCachePath(c("styler", styler_version)), full.names = TRUE, recursive = TRUE ) - date_boundary <- Sys.time() - 60 * 60 * 24 * 6 + date_boundary <- Sys.time() - 60L * 60L * 24L * 6L file.remove( all_cached[file.info(all_cached)$mtime < date_boundary] ) @@ -54,9 +54,7 @@ remove_old_cache_files <- function() { remove_cache_old_versions <- function() { dirs <- list.dirs(R.cache::getCachePath("styler"), recursive = FALSE) old_package_dirs <- dirs[basename(dirs) != as.character(styler_version)] - purrr::walk(old_package_dirs, function(dir) { - unlink(dir, recursive = TRUE, force = TRUE) - }) + purrr::walk(old_package_dirs, unlink, recursive = TRUE, force = TRUE) } # nocov end diff --git a/inst/WORDLIST b/inst/WORDLIST index b613a0c08..1cc6e08e5 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -138,6 +138,7 @@ netlify netReg nocomments NONINFRINGEMENT +nonportable nph NUM oldrel diff --git a/man/n_times_faster_with_cache.Rd b/man/n_times_faster_with_cache.Rd index f80a5f47b..b79701c03 100644 --- a/man/n_times_faster_with_cache.Rd +++ b/man/n_times_faster_with_cache.Rd @@ -9,7 +9,7 @@ n_times_faster_with_cache( x2 = x1, ..., fun = styler::style_text, - n = 3, + n = 3L, clear = "always" ) } diff --git a/man/wrap_else_multiline_curly.Rd b/man/wrap_else_multiline_curly.Rd index c6bce8e77..1a9534242 100644 --- a/man/wrap_else_multiline_curly.Rd +++ b/man/wrap_else_multiline_curly.Rd @@ -4,7 +4,7 @@ \alias{wrap_else_multiline_curly} \title{Add curly braces to else} \usage{ -wrap_else_multiline_curly(pd, indent_by = 2, space_after = 0L) +wrap_else_multiline_curly(pd, indent_by = 2L, space_after = 0L) } \arguments{ \item{pd}{A parse table.}