From 5925f46a2ef03a26ed26359c07e0b6831133762f Mon Sep 17 00:00:00 2001 From: dieghernan Date: Mon, 4 Mar 2024 11:53:51 +0000 Subject: [PATCH 01/13] Review as_bibentry --- CITATION.cff | 4 +- DESCRIPTION | 2 +- R/as_bibentry.R | 486 +++++++++--------- R/as_cff_person.R | 8 + README.md | 67 +-- codemeta.json | 6 +- data-raw/crosswalk_tables.R | 29 +- data/cran_to_spdx.rda | Bin 916 -> 907 bytes inst/examples/CITATION_skeleton.cff | 6 +- inst/extdata/crosswalk_tables.csv | 1 + inst/schemaorg.json | 4 +- man/as_bibentry.Rd | 19 +- man/cff-class.Rd | 18 +- man/deprecated_cff_to_bib.Rd | 9 +- tests/testthat/_snaps/as_bibentry.md | 159 +++--- tests/testthat/_snaps/cff-methods.md | 40 +- .../testthat/_snaps/xtra-check-bibtex-ruby.md | 12 +- tests/testthat/test-as_bibentry.R | 109 ++-- vignettes/bibtex_cff.Rmd | 2 + 19 files changed, 498 insertions(+), 483 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index f6be3074..f9ec74dd 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,5 +1,5 @@ # ----------------------------------------------------------- -# CITATION file created with {cffr} R package, v0.5.0.9000 +# CITATION file created with {cffr} R package, v0.99.0.9000 # See also: https://docs.ropensci.org/cffr/ # ----------------------------------------------------------- @@ -8,7 +8,7 @@ message: 'To cite package "cffr" in publications use:' type: software license: GPL-3.0-or-later title: 'cffr: Generate Citation File Format (''cff'') Metadata for R Packages' -version: 0.5.0.9000 +version: 0.99.0.9000 doi: 10.21105/joss.03900 abstract: The Citation File Format version 1.2.0 is a human and machine readable file format which provides citation metadata for diff --git a/DESCRIPTION b/DESCRIPTION index fd4613e1..d977faea 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: cffr Title: Generate Citation File Format ('cff') Metadata for R Packages -Version: 0.5.0.9000 +Version: 0.99.0.9000 Authors@R: c( person("Diego", "Hernangómez", , "diego.hernangomezherrero@gmail.com", role = c("aut", "cre", "cph"), comment = c(ORCID = "0000-0001-8457-4658")), diff --git a/R/as_bibentry.R b/R/as_bibentry.R index 3274ad20..5dfba8aa 100644 --- a/R/as_bibentry.R +++ b/R/as_bibentry.R @@ -7,15 +7,15 @@ #' @description #' #' This function creates `bibentry` objects (see [utils::bibentry()]) from -#' different metadata sources (`cff` objects, `DESCRIPTION` files, etc.). Note -#' that a **R** `bibentry` object is the representation of a BibTeX entry, +#' different metadata sources ([`cff`] objects, `DESCRIPTION` files, etc.). +#' Note that a **R** `bibentry` object is the representation of a BibTeX entry, #' see **Examples** #' -#' The function tries to map the information of the source `x` into a `cff` +#' The function tries to map the information of the source `x` into a [`cff`] #' object and performs a mapping of the metadata to BibTeX, according to #' `vignette("bibtex_cff", "cffr")`. #' -#' The inverse transformation (`bibentry` object to `cff` reference) can +#' The inverse transformation (`bibentry` object to [`cff`] reference) can #' be done with the corresponding [as_cff()] method. #' #' @seealso @@ -34,13 +34,14 @@ #' . #' #' @param x The source that would be used for generating -#' the [bibentry()] object via `cff`. It could be: +#' the [bibentry()] object via \CRANpkg{cffr}. It could be: #' * A missing value. That would retrieve the DESCRIPTION #' file on your in-development package. -#' * An existing [`cff`] object, -#' * Path to a CITATION.cff file (`"*/CITATION.cff*"`), +#' * An existing `cff` object created with [cff()], [cff_create()] or +#' [as_cff()]. +#' * Path to a CITATION.cff file (`"CITATION.cff"`), #' * The name of an installed package (`"jsonlite"`), or -#' * Path to a DESCRIPTION file (`"*/DESCRIPTION*"`). +#' * Path to a DESCRIPTION file (`"DESCRIPTION"`). #' @param what Fields to extract. The value could be: #' - `preferred`: This would create a single entry with the main citation #' info of the package. @@ -52,7 +53,7 @@ #' @family coercing #' #' @return -#' `as_bibentry()` returns s `bibentry` object (or a list of `bibentry` +#' `as_bibentry()` returns a `bibentry` object (or a list of `bibentry` #' objects). #' #' @export @@ -170,218 +171,108 @@ make_bibentry <- function(x) { tobibentry <- list() - # No mapping needed (direct mapping) ---- - # edition institution journal month number pages publisher title volume year + # Direct mapping ---- - # Guess type of entry---- - - tobibentry$bibtype <- guess_bibtype(x) - # address---- - tobibentry$address <- guess_address(x) - - # author---- + ## From BibTeX ---- + ### author---- tobibentry$author <- as.person(x$authors) - - - # booktitle ---- - - # Only for incollections and inproceedings - if (tobibentry$bibtype %in% c("incollection", "inproceedings")) { - tobibentry$booktitle <- x[["collection-title"]] - } - - # Fallback to conference name - - if (all( - tobibentry$bibtype == "inproceedings", - is.null(tobibentry$booktitle) - )) { - tobibentry$booktitle <- x$conference$name - } - - # chapter---- + ### chapter---- tobibentry$chapter <- x$section - # edition---- + ### edition---- tobibentry$edition <- x$edition - # editor---- - # Same case than authors + ### editor---- tobibentry$editor <- as.person(x$editors) - # howpublished---- - tobibentry$howpublished <- make_howpublised(x) - - # institution/organization ---- - - # For inproceedings, proceedings and manual this field - # is organization - - if (tobibentry$bibtype %in% c( - "inproceedings", "proceedings", - "manual" - )) { - # Just name - tobibentry$organization <- x$institution$name - } else { - tobibentry$institution <- x$institution$name - } + ### howpublished---- + tobibentry$howpublished <- get_bib_howpublised(x) - - # Fallback for techreport, search on affiliation first author - if (tobibentry$bibtype == "techreport" && is.null(tobibentry$institution)) { - tobibentry$institution <- x$authors[[1]]$affiliation - } - - # journal---- + ### journal---- tobibentry$journal <- x$journal - # month---- - m <- x$month - - # Fallback - - if (is.null(m) && !is.null(x$`date-published`)) { - # Should be YYYY-MM-DD to be valid on cff, so - m <- as.integer(format(as.Date(x$`date-published`), "%m")) - } - - # Try to parse to 3 month string - m_int <- suppressWarnings(as.integer(m)) - m_letters <- clean_str(tolower(month.abb[m_int])) - - if (!is.null(m_letters)) { - tobibentry$month <- m_letters - } else { - tobibentry$month <- clean_str(m) - } - - + ### note ---- + tobibentry$note <- get_bib_note(x) - # note ---- - tobibentry$note <- x$notes + ### number---- + tobibentry$number <- clean_str(x[["issue"]]) - # unpublished needs a note - if (all(is.null(x$notes), tobibentry$bibtype == "unpublished")) { - tobibentry$note <- "Extracted with cffr R package" - } + ### pages ---- + tobibentry$pages <- clean_str( + paste(unique(c(x$start, x$end)), collapse = "--") + ) - # number---- + ### publisher ---- + tobibentry$publisher <- clean_str(x$publisher$name) + ### title ---- + tobibentry$title <- x$title - tobibentry$number <- x[["issue"]] + ### volume---- + tobibentry$volume <- x$volume - # pages ---- + ## From BibLaTeX ---- - p <- unique(c(x$start, x$end)) + tobibentry$abstract <- x$abstract + tobibentry$date <- x$`date-published` + tobibentry$doi <- x$doi + tobibentry$file <- x$filename + tobibentry$isbn <- x$isbn + tobibentry$issn <- x$issn + tobibentry$issuetitle <- x$`issue-title` + tobibentry$keywords <- clean_str(paste0(unique(x$keywords), collapse = ",")) + tobibentry$pagetotal <- x$pages + tobibentry$translator <- toBibtex(as.person(x$translators)) + tobibentry$url <- x$url + tobibentry$urldate <- x$`date-accessed` + tobibentry$version <- x$version - if (!is.null(p)) tobibentry$pages <- paste(p, collapse = "--") - # publisher ---- - tobibentry$publisher <- x$publisher$name + # BibTeX entry---- - # school ---- - # In thesis - if (x$type == "thesis") { - tobibentry$school <- tobibentry$institution - tobibentry$institution <- NULL - } + tobibentry$bibtype <- guess_bibtype(x) + # address---- + tobibentry$address <- get_bib_address(x) - # series---- - if (is.null(tobibentry$booktitle)) { - tobibentry$series <- x$`collection-title` - } - # title ---- + # booktitle /series ---- + # Map cff collection-title + tobibentry <- c(tobibentry, get_bib_booktitle(x, tobibentry$bibtype)) - tobibentry$title <- x$title + # institution/organization/school ---- + # Map cff institution + tobibentry <- c(tobibentry, get_bib_inst_org(x, tobibentry$bibtype)) - # volume---- - tobibentry$volume <- x$volume + # month---- + tobibentry$month <- get_bib_month(x) # year ---- + tobibentry$year <- get_bib_year(x) - tobibentry$year <- x$year - # Fallback - - if (is.null(tobibentry$year) && !is.null(x$`date-released`)) { - # Should be YYYY-MM-DD to be valid on cff, so + # Handle anonymous author---- + # If anonymous coming from cff and not needed, then not use it - tobibentry$year <- substr(x$`date-released`, 1, 4) - } + is_anon <- identical(clean_str(x$authors[[1]]$name), "anonymous") + # If unknown remove from bib types that doesn't require it strictly - # Keywords - if (!is.null(x$keywords)) { - tobibentry$keywords <- paste(x$keywords, collapse = ", ") - } - # Guess inbook ---- - # inbook is a book where chapter or pages are present - - if (tobibentry$bibtype == "book" && !is.null( - c(tobibentry$chapter, tobibentry$pages) + if (all( + is_anon, + tobibentry$bibtype %in% c( + "booklet", "manual", "book", "inbook", + "misc", "proceedings" + ) )) { - tobibentry$bibtype <- "inbook" + tobibentry$author <- NULL } - - # key: First two given of author and year---- + # BibTeX key---- tobibentry$key <- make_bibkey(tobibentry) - - # Handle anonymous author---- - # If anonymous and not needed, then not use it - - - if (!is.null(x$authors[[1]]$name)) { - if (x$authors[[1]]$name == "anonymous" && - tobibentry$bibtype %in% c( - "booklet", "manual", "book", "inbook", - "misc", "proceedings" - )) { - tobibentry$author <- NULL - } - } - - # Add other interesting fields for BibLateX ---- - - tobibentry$abstract <- x$abstract - tobibentry$doi <- x$doi - tobibentry$date <- x$`date-published` - tobibentry$file <- x$filename - tobibentry$issuetitle <- x$`issue-title` - tobibentry$isbn <- x$isbn - tobibentry$issn <- x$issn - tobibentry$pagetotal <- x$pages - tobibentry$url <- x$url - tobibentry$urldate <- x$`date-accessed` - tobibentry$version <- x$version - # Translators - - trns <- x$translators - - trnsbib <- lapply(trns, function(y) { - if ("name" %in% names(y)) { - # Person protected on family - paste0("{", clean_str(y$name), "}") - } else { - fam <- clean_str(paste( - clean_str(y$`name-particle`), - clean_str(y$`family-names`) - )) - jr <- clean_str(y$`name-suffix`) - - given <- clean_str(y$`given-names`) - - paste(c(fam, jr, given), collapse = ", ") - } - }) - - tobibentry$translator <- paste(unlist(trnsbib), collapse = " and ") - - # sort ---- + # Final steps ---- + ## Sort ---- # based on default by # https://flamingtempura.github.io/bibtex-tidy/ tosort <- c( @@ -394,6 +285,7 @@ make_bibentry <- function(x) { sorted <- unique[unique %in% names(tobibentry)] tobibentry <- tobibentry[sorted] + ## Convert and catch errors ---- bib <- try(do.call(bibentry, tobibentry), silent = TRUE) # If key missing @@ -411,6 +303,29 @@ make_bibentry <- function(x) { return(bib) } +# Helpers to extract info ---- +get_bib_howpublised <- function(x) { + howpublished <- x$medium + + if (!is.null(howpublished)) { + f <- toupper(substr(howpublished, 0, 1)) + rest <- substr(howpublished, 2, nchar(howpublished)) + howpublished <- paste0(c(f, rest), collapse = "") + } + + clean_str(howpublished) +} + +get_bib_note <- function(x) { + note <- x$notes + + # unpublished needs a note + if (all(is.null(note), tolower(x$type) == "unpublished")) { + note <- "Extracted with cffr R package" + } + + clean_str(note) +} guess_bibtype <- function(x) { init_guess <- switch(tolower(x$type), @@ -430,33 +345,39 @@ guess_bibtype <- function(x) { "misc" ) + # Try guess Inbook ---- + # inbook is a book where chapter or pages are present + has_chapter <- !is.null(clean_str(x$section)) + has_pages <- !is.null( + clean_str(paste(unique(c(x$start, x$end)), collapse = "--")) + ) - # Try guess thesis - ttype <- clean_str(gsub("[[:punct:]]", "", - x$`thesis-type`, - perl = TRUE - )) + if (all(init_guess == "book", any(has_chapter, has_pages))) { + init_guess <- "inbook" + return(init_guess) + } - if (!is.null(ttype) && x$type == "thesis") { - if (grepl("Phd", ttype, ignore.case = TRUE)) { - init_guess <- "phdthesis" + # Try guess Phdthesis ---- + if (init_guess == "mastersthesis") { + ttype <- clean_str(gsub("[[:punct:]]", "", x$`thesis-type`, perl = TRUE)) + # phd + if (all(!is.null(ttype), grepl("phd", ttype, ignore.case = TRUE))) { + return("phdthesis") } } - # Check if it may be an incollection + # Try guess InCollection ---- # Hint: is misc with collection-title and publisher - - if (all( - init_guess == "misc", !is.null(x$`collection-title`), - !is.null(x$publisher), !is.null(x$year) - )) { - init_guess <- "incollection" + if (init_guess == "misc") { + if (!is.null(clean_str(x$`collection-title`))) { + return("incollection") + } } init_guess } -guess_address <- function(x) { +get_bib_address <- function(x) { # BibTeX 'address' is taken from the publisher (book, others) or the # conference (inproceedings). # Set logic: conference > institution > publisher @@ -468,16 +389,12 @@ guess_address <- function(x) { addr_search <- x$publisher } - - address <- clean_str(paste( - c( - addr_search$address, - addr_search$city, - addr_search$region, + address <- clean_str( + paste(c( + addr_search$address, addr_search$city, addr_search$region, addr_search$country - ), - collapse = ", " - )) + ), collapse = ", ") + ) # As a fallback, use also location if (is.null(address) && !is.null(x$location)) { @@ -487,53 +404,148 @@ guess_address <- function(x) { address } +get_bib_booktitle <- function(x, bibtype) { + # This map collection title. + # If inproceedings, incollection to booktitle + # rest of cases to series + + book_series <- list() + tag_value <- clean_str(x[["collection-title"]]) + + + if (!bibtype %in% c("incollection", "inproceedings")) { + book_series$series <- tag_value + } else { + # Only for incollections and inproceedings map booktitle + book_series$booktitle <- tag_value + + # Fallback to conference name for inproceedings + if (all(bibtype == "inproceedings", is.null(tag_value))) { + book_series$booktitle <- clean_str(x$conference$name) + } + } + book_series +} + +get_bib_inst_org <- function(x, bibtype) { + # For inproceedings, proceedings and manual this field + # is organization + # For thesis it should be school + + inst_org <- list() + # Just name + inst_name <- clean_str(x$institution$name) + + if (bibtype %in% c("inproceedings", "proceedings", "manual")) { + inst_org$organization <- inst_name + } else if (grepl("thesis", bibtype, ignore.case = TRUE)) { + inst_org$school <- inst_name + } else { + inst_org$institution <- inst_name + } + + # Fallback for techreport, search on affiliation first author + + if (bibtype == "techreport" && is.null(inst_org$institution)) { + inst_org$institution <- clean_str(x$authors[[1]]$affiliation) + } + + inst_org +} make_bibkey <- function(tobibentry) { - # Bear in mind institutions has only given - # Use the first two authors - aut_sur <- lapply(tobibentry$author[1:2], function(z) { - unz <- unlist(z) + # Be kind and provided a bibentry key + + y <- tobibentry$year + + + # Init etall + etall <- NULL + + + # Also Some entries don't have authors, but editors + # Others may have none (misc, pamphlet) + + init_aut <- tobibentry$author + + # Try get editor in null + if (is.null(init_aut)) { + init_aut <- tobibentry$editor + } + + # If none then get a key based in title + if (any(is.null(init_aut), length(init_aut) == 0)) { + r <- tolower(tobibentry$title) + # Reduce lenght to 15 max + r <- substr(r, 0, 15) + } else { + # First name/surname and et_all if additional authors + # Bear in mind institutions has only given + + nauths <- length(init_aut) + if (nauths > 1) etall <- "_etall" + + + # Get info of first author + unz <- unlist(init_aut[1]) if ("family" %in% names(unz)) { r <- unz["family"] - return(clean_str(r)) + } else { + r <- unz["given"] } - r <- unz["given"] - return(clean_str(r)) - }) - - - aut_sur <- tolower(paste0(unlist(aut_sur), collapse = "")) - aut_sur <- gsub("\\s*", "", aut_sur) + r <- tolower(paste0(r, collapse = "")) + } # Try hard to remove accents # First with iconv - aut_sur <- iconv(aut_sur, - from = "UTF-8", to = "ASCII//TRANSLIT", - sub = "?" - ) + r <- iconv(r, from = "UTF-8", to = "ASCII//TRANSLIT", sub = "?") # Next to latex - aut_sur <- encoded_utf_to_latex(aut_sur) + r <- encoded_utf_to_latex(r) # Finally keep only a-z letters for key - aut_sur <- gsub("[^_a-z]", "", aut_sur) - - y <- tobibentry$year + r <- gsub("[^_a-z]", "", r) - key <- paste(c(aut_sur, y), collapse = ":") + # Append etall and year + key <- paste0(c(r, etall), collapse = "") + key <- paste(c(key, y), collapse = ":") key } -make_howpublised <- function(x) { - howpublished <- x$medium - if (!is.null(howpublished)) { - # Capitalize first letter - letts <- unlist(strsplit(howpublished, "|")) - howpublished <- - clean_str(paste0(c(toupper(letts[1]), letts[-1]), collapse = "")) +get_bib_month <- function(x) { + m <- x$month + + # Fallback + + if (is.null(m) && !is.null(x$`date-published`)) { + # Should be YYYY-MM-DD to be valid on cff, so + m <- as.integer(format(as.Date(x$`date-published`), "%m")) } - clean_str(howpublished) + # Try to parse to 3 month string + m_int <- suppressWarnings(as.integer(m)) + m_letters <- clean_str(tolower(month.abb[m_int])) + + if (!is.null(m_letters)) { + month <- m_letters + } else { + month <- clean_str(m) + } + + month +} + +get_bib_year <- function(x) { + year <- x$year + + # Fallback + + if (is.null(year) && !is.null(x$`date-released`)) { + # Should be YYYY-MM-DD to be valid on cff, so + year <- substr(x$`date-released`, 1, 4) + } + + clean_str(year) } diff --git a/R/as_cff_person.R b/R/as_cff_person.R index 60a17e0e..eb930040 100644 --- a/R/as_cff_person.R +++ b/R/as_cff_person.R @@ -470,3 +470,11 @@ extract_person_comments <- function(person) { fin_list } + +protect_bib_braces <- function(x) { + paste0("{", x, "}") +} + +cff_person_to_string <- function(x) { + +} diff --git a/README.md b/README.md index 2b438aed..530ec139 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ file and the `CITATION` file (if present) of your package. Note that **cffr** works best if your package pass `R CMD check/devtools::check()`. -As per 2024-03-03 there are at least 306 repos on GitHub using **cffr**. +As per 2024-03-04 there are at least 302 repos on GitHub using **cffr**. [Check them out here](https://github.com/search?q=cffr%20path%3A**%2FCITATION.cff&type=code). @@ -561,26 +561,6 @@ test <- cff_create("rmarkdown") - family-names: Chirico. given-names: Michael year: '2024' - - type: software - title: dygraphs - abstract: 'dygraphs: Interface to ''Dygraphs'' Interactive Time Series Charting - Library' - notes: Suggests - url: https://github.com/rstudio/dygraphs - repository: https://CRAN.R-project.org/package=dygraphs - authors: - - family-names: Vanderkam - given-names: Dan - website: http://dygraphs.com/ - - family-names: Allaire - given-names: JJ - - family-names: Owen - given-names: Jonathan - - family-names: Gromer - given-names: Daniel - - family-names: Thieurmel - given-names: Benoit - year: '2024' - type: software title: fs abstract: 'fs: Cross-Platform File System Operations Based on ''libuv''' @@ -597,26 +577,6 @@ test <- cff_create("rmarkdown") given-names: Gábor email: csardi.gabor@gmail.com year: '2024' - - type: software - title: rsconnect - abstract: 'rsconnect: Deploy Docs, Apps, and APIs to ''Posit Connect'', ''shinyapps.io'', - and ''RPubs''' - notes: Suggests - url: https://rstudio.github.io/rsconnect/ - repository: https://CRAN.R-project.org/package=rsconnect - authors: - - family-names: Atkins - given-names: Aron - email: aron@posit.co - - family-names: Allen - given-names: Toph - - family-names: Wickham - given-names: Hadley - - family-names: McPherson - given-names: Jonathan - - family-names: Allaire - given-names: JJ - year: '2024' - type: software title: downlit abstract: 'downlit: Syntax Highlighting and Automatic Linking' @@ -629,19 +589,6 @@ test <- cff_create("rmarkdown") email: hadley@posit.co year: '2024' version: '>= 0.4.0' - - type: software - title: katex - abstract: 'katex: Rendering Math to HTML, ''MathML'', or R-Documentation Format' - notes: Suggests - url: https://docs.ropensci.org/katex/ - repository: https://CRAN.R-project.org/package=katex - authors: - - family-names: Ooms - given-names: Jeroen - email: jeroen@berkeley.edu - orcid: https://orcid.org/0000-0002-4035-0289 - year: '2024' - version: '>= 1.4.0' - type: software title: sass abstract: 'sass: Syntactically Awesome Style Sheets (''Sass'')' @@ -752,18 +699,6 @@ test <- cff_create("rmarkdown") given-names: Davis email: davis@posit.co year: '2024' - - type: software - title: cleanrmd - abstract: 'cleanrmd: Clean Class-Less ''R Markdown'' HTML Documents' - notes: Suggests - url: https://pkg.garrickadenbuie.com/cleanrmd/ - repository: https://CRAN.R-project.org/package=cleanrmd - authors: - - family-names: Aden-Buie - given-names: Garrick - email: garrick@adenbuie.com - orcid: https://orcid.org/0000-0002-7111-0077 - year: '2024' - type: software title: withr abstract: 'withr: Run Code ''With'' Temporarily Modified Global State' diff --git a/codemeta.json b/codemeta.json index 8ba3d173..ba3907f6 100644 --- a/codemeta.json +++ b/codemeta.json @@ -8,13 +8,13 @@ "codeRepository": "https://github.com/ropensci/cffr", "issueTracker": "https://github.com/ropensci/cffr/issues", "license": "https://spdx.org/licenses/GPL-3.0", - "version": "0.5.0.9000", + "version": "0.99.0.9000", "programmingLanguage": { "@type": "ComputerLanguage", "name": "R", "url": "https://r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31 ucrt)", + "runtimePlatform": "R version 4.3.2 (2023-10-31)", "provider": { "@id": "https://cran.r-project.org", "@type": "Organization", @@ -200,7 +200,7 @@ }, "isPartOf": "https://ropensci.org", "keywords": ["attribution", "citation", "credit", "citation-files", "cff", "metadata", "r", "r-package", "citation-file-format", "rstats", "ropensci", "cran"], - "fileSize": "961.96KB", + "fileSize": "947.426KB", "citation": [ { "@type": "ScholarlyArticle", diff --git a/data-raw/crosswalk_tables.R b/data-raw/crosswalk_tables.R index b2199230..ca649f66 100644 --- a/data-raw/crosswalk_tables.R +++ b/data-raw/crosswalk_tables.R @@ -1,17 +1,20 @@ -## code to prepare `crosswalk_tables.csv` -library(tidyverse) -df <- openxlsx::read.xlsx("data-raw/crosswalk_tables.xlsx") - -unlink("/inst/extdata/crosswalk_tables.csv") -lapply(df, trimws) %>% - bind_rows() %>% - setNames(names(df)) %>% - write.csv("./inst/extdata/crosswalk_tables.csv", row.names = FALSE) - -cli::cli_alert_success("Excel updated") - +# library(tidyverse) +# df <- openxlsx::read.xlsx("data-raw/crosswalk_tables.xlsx") # +# unlink("/inst/extdata/crosswalk_tables.csv") # lapply(df, trimws) %>% # bind_rows() %>% # setNames(names(df)) %>% -# openxlsx::write.xlsx("data-raw/crosswalk_tables.xlsx", overwrite = TRUE) +# write.csv("./inst/extdata/crosswalk_tables.csv", row.names = FALSE) +# +# cli::cli_alert_success("Excel updated") +# +# # +# # lapply(df, trimws) %>% +# # bind_rows() %>% +# # setNames(names(df)) %>% +# # openxlsx::write.xlsx("data-raw/crosswalk_tables.xlsx", overwrite = TRUE) + +library(tidyverse) + +tb <- read_csv("./inst/extdata/crosswalk_tables.csv") diff --git a/data/cran_to_spdx.rda b/data/cran_to_spdx.rda index 0bd1435cae708e762b962cb7a97ec9ae0c12fef9..33fae2994ac0070f09336d3d8c403ec099798a1f 100644 GIT binary patch literal 907 zcmV;619bdCT4*^jL0KkKS>9zRc>o3)f6)K`|B!TNe+NIOUueH)-|#>H0ssI3&;$PH zMJcLQXdxn$LqY0#n*AnjWXMA%O-!8VroodWVq>8hVWZpyrJl8ess_0%@QOhK3*w7>x`OkO2~jFe$2d zXwawXnjVu)lzN&O4WcqVOllbqQxUPLFe>dz=&r?fZ9@1WKjajLMimfmJ2mUYPPPXx zj*Ybg1&%$?qNxPUhn5dz<$F-rN4=W12TeQiUlnt8<0b6Yj7qp-b#>kRV}V z`Fj%x=Ccapm4j`8K(v)wkqAVgQwwED3edK<)|qCdvuV!(PmL8wD1-^BC2K`h&j3QP zL0u36=!%xXc0(x;6=KLKJiY@Gylf7R8cZa*UN&lz^7{|Bd%yVmS_cO2<5+P%UUOvX zSz@tbnf*NMPJ>Nh!Uq@-^UcwV*1cylE@5sg3T+_5-MXcG?wWWD;2vA3o z&8+4_l-WR}LCwk=!sx_DBpAAMWvtJbNN+NfJb=(2mY@It literal 916 zcmV;F18e*riwFP!000002F+JXPuoBccH=-unm|gSm3lH&4Vna`W6qYoKWTU7l z6%~ibghjRFILLPBCXZ^{hliR@wmg?9cUwC}dynSrJ>vGR3u>%N zH7^9s2jmCN)MFU+jvqQf=!^lyHV7uMxN$r@(R46&?O7nkAL|Ucu8wsheU#`}%-}Kg zct}p6f|sD3J$Vg6?f4@&g<#mx_GkbE&SM;}8efM{{dWsb-+YHfc*I(skDzQMQuc9 z1!Aohx0C}Wz@61{kAy z&Yir|26bnZ!6Z%^)E>i>qx;N-Fqt-QvgfmsNZIvBwAm0ab2)67k|eIP;A`!G#>mpM z^iZ=B9+QksbB1dyET}lE|DM>xOF{e0ntpR!*DkoCa;-+&L=l6|gqH=3{IpT6Ecq3S zYeuoij3tXyK2yb3u8OVv0aVXC*O@pWx)Nr+Lh>6AcYHg*)N{XbpuvMrDCh&%#!Z-b zl8jZht*40o7ZBOVmSVVL106tdg;alC4R?J;u`wz#?3 zjfZj`(dQt-@SQK_UFh<3UL@mGl-u{nq~C0?X$Sm-txHN&hBcFxl`GI@LE+XjzF=3V z{08ZT_c^er!9v~w*h7PCW`0WWB0K*ZUKH0qz)3MkD`ZhnY^`QN@PAw(38+v}F}WMf z%vENY4z3n27R)i3_&a+GMMZe~UGnS+3epi+%Hr$8?&+{g{tFEUC+Ffm{5BSleFUss q*`d9A?PHSyz3NXt?~+F~qDOtlUw3zRKk(1b=<^FCJhi~74*&oy4!s@# diff --git a/inst/examples/CITATION_skeleton.cff b/inst/examples/CITATION_skeleton.cff index 5843065d..5b94b021 100644 --- a/inst/examples/CITATION_skeleton.cff +++ b/inst/examples/CITATION_skeleton.cff @@ -1,8 +1,8 @@ # A minimal CFF file with only the required fields included. -authors: - - family-names: Doe - given-names: John cff-version: 1.2.0 message: "If you use this software, please cite it using these metadata." title: "My Research Software" +authors: + - family-names: Doe + given-names: John diff --git a/inst/extdata/crosswalk_tables.csv b/inst/extdata/crosswalk_tables.csv index 155c598f..97aee6ce 100644 --- a/inst/extdata/crosswalk_tables.csv +++ b/inst/extdata/crosswalk_tables.csv @@ -75,6 +75,7 @@ "fields_biblatex2cff","**isbn**","[isbn]{.underline}",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "fields_biblatex2cff","**issn**","[issn]{.underline}",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "fields_biblatex2cff","**issuetitle**","[issue-title]{.underline}",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"fields_biblatex2cff","**keywords**","[keywords]{.underline}",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "fields_biblatex2cff","**pagetotal**","[pages]{.underline}",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "fields_biblatex2cff","**translator**","[translators]{.underline}",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "fields_biblatex2cff","**url**","[url]{.underline}",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA diff --git a/inst/schemaorg.json b/inst/schemaorg.json index dec7a9c6..015b552c 100644 --- a/inst/schemaorg.json +++ b/inst/schemaorg.json @@ -26,6 +26,6 @@ "name": "Comprehensive R Archive Network (CRAN)", "url": "https://cran.r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31 ucrt)", - "version": "0.5.0.9000" + "runtimePlatform": "R version 4.3.2 (2023-10-31)", + "version": "0.99.0.9000" } diff --git a/man/as_bibentry.Rd b/man/as_bibentry.Rd index 162367b4..70106729 100644 --- a/man/as_bibentry.Rd +++ b/man/as_bibentry.Rd @@ -11,14 +11,15 @@ as_bibentry(x, what = c("preferred", "references", "all")) } \arguments{ \item{x}{The source that would be used for generating -the \code{\link[=bibentry]{bibentry()}} object via \code{cff}. It could be: +the \code{\link[=bibentry]{bibentry()}} object via \CRANpkg{cffr}. It could be: \itemize{ \item A missing value. That would retrieve the DESCRIPTION file on your in-development package. -\item An existing \code{\link{cff}} object, -\item Path to a CITATION.cff file (\code{"*/CITATION.cff*"}), +\item An existing \code{cff} object created with \code{\link[=cff]{cff()}}, \code{\link[=cff_create]{cff_create()}} or +\code{\link[=as_cff]{as_cff()}}. +\item Path to a CITATION.cff file (\code{"CITATION.cff"}), \item The name of an installed package (\code{"jsonlite"}), or -\item Path to a DESCRIPTION file (\code{"*/DESCRIPTION*"}). +\item Path to a DESCRIPTION file (\code{"DESCRIPTION"}). }} \item{what}{Fields to extract. The value could be: @@ -35,7 +36,7 @@ both the preferred citation info and the references. \item{...}{Arguments passed to \code{\link[utils:toLatex]{utils::toBibtex()}}.} } \value{ -\code{as_bibentry()} returns s \code{bibentry} object (or a list of \code{bibentry} +\code{as_bibentry()} returns a \code{bibentry} object (or a list of \code{bibentry} objects). \code{toBibtex.cff()} returns a \code{Bibtex} object that can be printed as BibTeX @@ -43,15 +44,15 @@ markup. } \description{ This function creates \code{bibentry} objects (see \code{\link[utils:bibentry]{utils::bibentry()}}) from -different metadata sources (\code{cff} objects, \code{DESCRIPTION} files, etc.). Note -that a \strong{R} \code{bibentry} object is the representation of a BibTeX entry, +different metadata sources (\code{\link{cff}} objects, \code{DESCRIPTION} files, etc.). +Note that a \strong{R} \code{bibentry} object is the representation of a BibTeX entry, see \strong{Examples} -The function tries to map the information of the source \code{x} into a \code{cff} +The function tries to map the information of the source \code{x} into a \code{\link{cff}} object and performs a mapping of the metadata to BibTeX, according to \code{vignette("bibtex_cff", "cffr")}. -The inverse transformation (\code{bibentry} object to \code{cff} reference) can +The inverse transformation (\code{bibentry} object to \code{\link{cff}} reference) can be done with the corresponding \code{\link[=as_cff]{as_cff()}} method. Additionally, it is also provided a method for \code{\link[=toBibtex]{toBibtex()}}, that can diff --git a/man/cff-class.Rd b/man/cff-class.Rd index 602b2020..4c923674 100644 --- a/man/cff-class.Rd +++ b/man/cff-class.Rd @@ -93,12 +93,12 @@ comply with the validation rules of the CFF specification. \if{html}{\out{
}}\preformatted{minimal_cff <- cff() minimal_cff -#> authors: -#> - family-names: Doe -#> given-names: John #> cff-version: 1.2.0 #> message: If you use this software, please cite it using these metadata. #> title: My Research Software +#> authors: +#> - family-names: Doe +#> given-names: John as_df <- as.data.frame(minimal_cff) @@ -107,11 +107,11 @@ class(as_df) t(as_df) #> [,1] -#> authors.00.family_names "Doe" -#> authors.00.given_names "John" #> cff_version "1.2.0" #> message "If you use this software, please cite it using these metadata." -#> title "My Research Software" +#> title "My Research Software" +#> authors.00.family_names "Doe" +#> authors.00.given_names "John" }\if{html}{\out{
}} } @@ -122,12 +122,12 @@ t(as_df) \if{html}{\out{
}}\preformatted{new_keys <- c("date-released" = "2020-01-31", abstract = "Minimal example") c(minimal_cff, new_keys) -#> authors: -#> - family-names: Doe -#> given-names: John #> cff-version: 1.2.0 #> message: If you use this software, please cite it using these metadata. #> title: My Research Software +#> authors: +#> - family-names: Doe +#> given-names: John #> date-released: '2020-01-31' #> abstract: Minimal example }\if{html}{\out{
}} diff --git a/man/deprecated_cff_to_bib.Rd b/man/deprecated_cff_to_bib.Rd index 3d6cca79..1cff5cd4 100644 --- a/man/deprecated_cff_to_bib.Rd +++ b/man/deprecated_cff_to_bib.Rd @@ -11,14 +11,15 @@ cff_to_bibtex(x, what = c("preferred", "references", "all")) } \arguments{ \item{x}{The source that would be used for generating -the \code{\link[=bibentry]{bibentry()}} object via \code{cff}. It could be: +the \code{\link[=bibentry]{bibentry()}} object via \CRANpkg{cffr}. It could be: \itemize{ \item A missing value. That would retrieve the DESCRIPTION file on your in-development package. -\item An existing \code{\link{cff}} object, -\item Path to a CITATION.cff file (\code{"*/CITATION.cff*"}), +\item An existing \code{cff} object created with \code{\link[=cff]{cff()}}, \code{\link[=cff_create]{cff_create()}} or +\code{\link[=as_cff]{as_cff()}}. +\item Path to a CITATION.cff file (\code{"CITATION.cff"}), \item The name of an installed package (\code{"jsonlite"}), or -\item Path to a DESCRIPTION file (\code{"*/DESCRIPTION*"}). +\item Path to a DESCRIPTION file (\code{"DESCRIPTION"}). }} \item{what}{Fields to extract. The value could be: diff --git a/tests/testthat/_snaps/as_bibentry.md b/tests/testthat/_snaps/as_bibentry.md index 6bb63ce4..b0e779a9 100644 --- a/tests/testthat/_snaps/as_bibentry.md +++ b/tests/testthat/_snaps/as_bibentry.md @@ -29,7 +29,7 @@ volume = {27}, number = {2}, pages = {97--111}, - keywords = {Some, simple, keywords}, + keywords = {Some,simple,keywords}, } # Book to bibtex @@ -58,7 +58,7 @@ Code toBibtex(bib) Output - @Book{mittelbachgossens:2004, + @Book{mittelbach_etall:2004, title = {The LaTeX Companion}, author = {Frank Mittelbach and Michel Gossens and Johannes Braams and David Carlisle and Chris Rowley}, year = {2004}, @@ -71,7 +71,7 @@ number = {7}, note = {Example modified for testing purposes}, edition = {Fourth}, - keywords = {Two, keyword}, + keywords = {Two,keyword}, } # Booklet to bibtex @@ -136,7 +136,7 @@ Code toBibtex(bib) Output - @InBook{delemosgacek:2003, + @InBook{delemos_etall:2003, title = {Architectural Mismatch Tolerance}, author = {R. {de Lemos} and C. Gacek and A. Romanovsky}, year = {2003}, @@ -212,7 +212,7 @@ Code toBibtex(bib) Output - @InProceedings{aberdeenbayer:1999, + @InProceedings{aberdeen_etall:1999, title = {Implementing Practical Dialogue Systems with the DARPA Communicator Architecture}, author = {John Aberdeen and Samuel Bayer and Sasha Caskey and Laurie Damianos and Alan Goldschen and Lynette Hirschman and Dan Loehr and Hugo Trapper}, year = {1999}, @@ -229,7 +229,7 @@ Code toBibtex(bib) Output - @InProceedings{aberdeenbayer:1999, + @InProceedings{aberdeen_etall:1999, title = {Implementing Practical Dialogue Systems with the DARPA Communicator Architecture}, author = {John Aberdeen and Samuel Bayer and Sasha Caskey and Laurie Damianos and Alan Goldschen and Lynette Hirschman and Dan Loehr and Hugo Trapper}, year = {1999}, @@ -260,7 +260,7 @@ Code toBibtex(bib) Output - @Manual{allweinbarkerplummer:1999, + @Manual{allwein_etall:1999, title = {LPL Software Manual}, author = {Gerhard Allwein and Dave Barker-Plummer and Jon Barwise and John Etchemendy}, year = {1999}, @@ -347,7 +347,7 @@ Code toBibtex(bib) Output - @Proceedings{anonymous:1996, + @Proceedings{alferes_etall:1996, title = {An Abductive Framework for Negation in Disjunctive Logic Programming}, year = {1996}, publisher = {Springer-Verlag}, @@ -384,6 +384,19 @@ institution = {Intelligent Systems Program, University of Pittsburgh}, } +--- + + Code + toBibtex(bib) + Output + @TechReport{aronis:1993, + title = {Implementing Inheritance on the Connection Machine}, + author = {John M. Aronis}, + year = {1993}, + number = {ISP 93-1}, + institution = {rOpenSci}, + } + # Unpublished to bibtex Code @@ -402,7 +415,7 @@ Code toBibtex(bib) Output - @Unpublished{aronisprovost:1959, + @Unpublished{aronis_etall:1959, title = {Efficiently Constructing Relational Features from Background}, author = {John M. Aronis and Foster J. Provost}, year = {1959}, @@ -414,13 +427,67 @@ Code toBibtex(bib) Output - @Unpublished{aronisprovost:1959, + @Unpublished{aronis_etall:1959, title = {Efficiently Constructing Relational Features from Background}, author = {John M. Aronis and Foster J. Provost}, year = {1959}, note = {Extracted with cffr R package}, } +# Test BibLateX entry + + Code + toBibtex(bib) + Output + @Article{, + author = {M. A. Kastenholz and Philippe H. Hünenbergerb}, + title = {Computation of methodology hyphen independent ionic solvation + free energies from molecular simulations}, + journal = {J. Chem. Phys.}, + year = {2006}, + note = {Example modified for testing purposes}, + pages = {55--65}, + date = {2006-03-15}, + file = {a_file.pdf}, + issuetitle = {Semantic {3D} Media and Content}, + translator = {Wicksteed, P. H. and {The translator factory}}, + urldate = {2006-10-01}, + pagetotal = {528}, + abstract = {The computation of ionic solvation free energies from + atomistic simulations is a surprisingly difficult problem that + has found no satisfactory solution for more than 15 years.}, + doi = {10.1063/1.2172593}, + isbn = {0-816-52066-6}, + issn = {0097-8493}, + url = {http://www.ctan.org}, + } + +--- + + Code + toBibtex(parsed) + Output + @Article{kastenholz_etall:2006, + title = {Computation of methodology hyphen independent ionic solvation free energies from molecular simulations}, + author = {M. A. Kastenholz and Philippe H. Hünenbergerb}, + year = {2006}, + month = {mar}, + journal = {J. Chem. Phys.}, + pages = {55--65}, + doi = {10.1063/1.2172593}, + isbn = {0-816-52066-6}, + issn = {0097-8493}, + url = {http://www.ctan.org}, + note = {Example modified for testing purposes}, + abstract = {The computation of ionic solvation free energies from atomistic simulations is a surprisingly difficult problem that has found no satisfactory solution for more than 15 years.}, + date = {2006-03-15}, + file = {a_file.pdf}, + issuetitle = {Semantic 3D Media and Content}, + pagetotal = {528}, + translator = {Wicksteed, P. H. and {The translator factory}}, + urldate = {2006-10-01}, + } + # particle names Code @@ -444,7 +511,7 @@ Code toBibtex(bib) Output - @Book{vanleunendavisjr:1979, + @Book{vanleunen_etall:1979, title = {A Handbook for Scholars}, author = {Mary-Claire {van Leunen} and Sammy {Davis Jr.}}, year = {1979}, @@ -479,7 +546,7 @@ Code toBibtex(bib) Output - @InBook{vanderrealpersoniventityprojectteamconferenceentity:2017, + @InBook{vanderrealpersoniv_etall:2017, title = {Book Title}, author = {One Truly {van der Real Person IV} and {Entity Project Team Conference entity}}, year = {2017}, @@ -500,16 +567,16 @@ chapter = {Chapter 2 - "Reference keys"}, edition = {2nd edition}, howpublished = {Hardcover book}, - institution = {Entity Project Team Conference entity}, - keywords = {Software, Citation}, abstract = {Description of the book.}, date = {2017-10-31}, file = {book.zip}, issuetitle = {Special Issue on Software Citation}, + keywords = {Software,Citation}, pagetotal = {765}, + translator = {van der Real Person IV, One Truly and {Entity Project Team Conference entity}}, urldate = {2017-10-31}, version = {0.0.1423-BETA}, - translator = {van der Real Person, IV, One Truly and {Entity Project Team Conference entity}}, + institution = {Entity Project Team Conference entity}, } # Test anonymous @@ -517,7 +584,7 @@ Code toBibtex(back) Output - @Booklet{anonymous, + @Booklet{abooklet, title = {A booklet}, } @@ -526,7 +593,7 @@ Code toBibtex(back) Output - @Manual{anonymous, + @Manual{amanual, title = {A manual}, } @@ -535,7 +602,7 @@ Code toBibtex(back) Output - @Misc{anonymous, + @Misc{amisc, title = {A misc}, } @@ -544,7 +611,7 @@ Code toBibtex(back) Output - @Proceedings{anonymous:1984, + @Proceedings{proceedings:1984, title = {proceedings}, year = {1984}, } @@ -576,60 +643,6 @@ date = {1678-04-23}, } -# Test BibLateX entry - - Code - toBibtex(bib) - Output - @Article{, - author = {M. A. Kastenholz and Philippe H. Hünenbergerb}, - title = {Computation of methodology hyphen independent ionic solvation - free energies from molecular simulations}, - journal = {J. Chem. Phys.}, - year = {2006}, - note = {Example modified for testing purposes}, - pages = {55--65}, - date = {2006-03-15}, - file = {a_file.pdf}, - issuetitle = {Semantic {3D} Media and Content}, - translator = {Wicksteed, P. H. and {The translator factory}}, - urldate = {2006-10-01}, - pagetotal = {528}, - abstract = {The computation of ionic solvation free energies from - atomistic simulations is a surprisingly difficult problem that - has found no satisfactory solution for more than 15 years.}, - doi = {10.1063/1.2172593}, - isbn = {0-816-52066-6}, - issn = {0097-8493}, - url = {http://www.ctan.org}, - } - ---- - - Code - toBibtex(parsed) - Output - @Article{kastenholzhunenbergerb:2006, - title = {Computation of methodology hyphen independent ionic solvation free energies from molecular simulations}, - author = {M. A. Kastenholz and Philippe H. Hünenbergerb}, - year = {2006}, - month = {mar}, - journal = {J. Chem. Phys.}, - pages = {55--65}, - doi = {10.1063/1.2172593}, - isbn = {0-816-52066-6}, - issn = {0097-8493}, - url = {http://www.ctan.org}, - note = {Example modified for testing purposes}, - abstract = {The computation of ionic solvation free energies from atomistic simulations is a surprisingly difficult problem that has found no satisfactory solution for more than 15 years.}, - date = {2006-03-15}, - file = {a_file.pdf}, - issuetitle = {Semantic 3D Media and Content}, - pagetotal = {528}, - urldate = {2006-10-01}, - translator = {Wicksteed, P. H. and {The translator factory}}, - } - # Test Fallback year Code diff --git a/tests/testthat/_snaps/cff-methods.md b/tests/testthat/_snaps/cff-methods.md index 15a36553..8de91935 100644 --- a/tests/testthat/_snaps/cff-methods.md +++ b/tests/testthat/_snaps/cff-methods.md @@ -814,15 +814,15 @@ [14] "references.00.authors.03.name" [15] "references.00.authors.03.date_end" [16] "references.00.url" - [17] "references.00.keywords.00" - [18] "references.00.keywords.01" - [19] "references.00.keywords.02" - [20] "references.00.keywords.03" - [21] "references.00.keywords.04" - [22] "references.00.keywords.05" - [23] "references.00.keywords.06" - [24] "references.00.keywords.07" - [25] "references.00.abstract" + [17] "references.00.abstract" + [18] "references.00.keywords.00" + [19] "references.00.keywords.01" + [20] "references.00.keywords.02" + [21] "references.00.keywords.03" + [22] "references.00.keywords.04" + [23] "references.00.keywords.05" + [24] "references.00.keywords.06" + [25] "references.00.keywords.07" [26] "references.00.version" # Convert authors only @@ -927,37 +927,37 @@ Code a_cff Output - authors: - - family-names: Doe - given-names: John cff-version: 1.2.0 message: If you use this software, please cite it using these metadata. title: My Research Software + authors: + - family-names: Doe + given-names: John --- Code head(a_cff, 2) Output - authors: - - family-names: Doe - given-names: John cff-version: 1.2.0 + message: If you use this software, please cite it using these metadata. --- Code tail(a_cff, 2) Output - message: If you use this software, please cite it using these metadata. title: My Research Software + authors: + - family-names: Doe + given-names: John # toBibtex Code toBibtex(full_cff) Output - @Misc{druskatspaaks:2021, + @Misc{druskat_etall:2021, title = {Citation File Format}, author = {Stephan Druskat and Jurriaan H. Spaaks and Neil {Chue Hong} and Robert Haines and James Baker and Spencer Bliven and Egon Willighagen and David Pérez-Suárez and Alexander Konovalov}, year = {2021}, @@ -987,7 +987,7 @@ note = {Publisher: The Open Journal}, } - @Article{boettigerchamberlain:2016, + @Article{boettiger_etall:2016, title = {RNeXML: A Package for Reading and Writing Richly Annotated Phylogenetic, Character, and Trait Data in R}, author = {Carl Boettiger and Scott Chamberlain and Rutger Vos and Hilmar Lapp}, year = {2016}, @@ -1011,7 +1011,7 @@ Code toBibtex(full_cff, what = "all") Output - @Misc{druskatspaaks:2021, + @Misc{druskat_etall:2021, title = {Citation File Format}, author = {Stephan Druskat and Jurriaan H. Spaaks and Neil {Chue Hong} and Robert Haines and James Baker and Spencer Bliven and Egon Willighagen and David Pérez-Suárez and Alexander Konovalov}, year = {2021}, @@ -1036,7 +1036,7 @@ note = {Publisher: The Open Journal}, } - @Article{boettigerchamberlain:2016, + @Article{boettiger_etall:2016, title = {RNeXML: A Package for Reading and Writing Richly Annotated Phylogenetic, Character, and Trait Data in R}, author = {Carl Boettiger and Scott Chamberlain and Rutger Vos and Hilmar Lapp}, year = {2016}, diff --git a/tests/testthat/_snaps/xtra-check-bibtex-ruby.md b/tests/testthat/_snaps/xtra-check-bibtex-ruby.md index 4e11e51f..408e81ee 100644 --- a/tests/testthat/_snaps/xtra-check-bibtex-ruby.md +++ b/tests/testthat/_snaps/xtra-check-bibtex-ruby.md @@ -29,7 +29,7 @@ Code toBibtex(bib) Output - @InProceedings{gamblinlegendre:2016, + @InProceedings{gamblin_etall:2016, title = {The Spack Package Manager: Bringing Order to HPC Software Chaos}, author = {Todd Gamblin and Matthew LeGendre and Michael R. Collette and Gregory L. Lee and Adam Moody and Bronis R. {de Supinski} and Scott Futral}, year = {2016}, @@ -45,7 +45,7 @@ Code toBibtex(bib) Output - @InProceedings{gamblinlegendre:2015, + @InProceedings{gamblin_etall:2015, title = {The Spack Package Manager: Bringing Order to HPC Software Chaos}, author = {Todd Gamblin and Matthew LeGendre and Michael R. Collette and Gregory L. Lee and Adam Moody and Bronis R. {de Supinski} and Scott Futral}, year = {2015}, @@ -59,7 +59,7 @@ Code toBibtex(bib) Output - @InProceedings{rampinfreire:2016, + @InProceedings{rampin_etall:2016, title = {ReproZip: Computational Reproducibility With Ease}, author = {Rémi Rampin and Juliana Freire and Fernando Chirigati and Dennis Shasha}, year = {2016}, @@ -102,7 +102,7 @@ Code toBibtex(bib) Output - @Article{hartmannwong:2020, + @Article{hartmann_etall:2020, title = {An image-based data-driven analysis of cellular architecture in a developing tissue}, author = {Jonas Hartmann and Mie Wong and Elisa Gallo and Darren Gilmour}, year = {2020}, @@ -176,7 +176,7 @@ Code toBibtex(bib) Output - @Proceedings{rampinfreire:2016, + @Proceedings{rampin_etall:2016, title = {ReproZip: Computational Reproducibility With Ease}, author = {Remi Rampin and Juliana Freire and Fernando Chirigati and Dennis Shasha}, year = {2016}, @@ -192,7 +192,7 @@ Code toBibtex(bib) Output - @Article{smithkatz:2016, + @Article{smith_etall:2016, title = {Software citation principles}, author = {A. M. Smith and D. S. Katz and K. E. Niemeyer and {FORCE11 Software Citation Working Group}}, year = {2016}, diff --git a/tests/testthat/test-as_bibentry.R b/tests/testthat/test-as_bibentry.R index 0baf3dc6..c95b961f 100644 --- a/tests/testthat/test-as_bibentry.R +++ b/tests/testthat/test-as_bibentry.R @@ -221,7 +221,9 @@ test_that("Proceedings to bibtex", { test_that("TechReport to bibtex", { bib <- bibentry("TechReport", - author = "John M. Aronis", + author = person("John M.", "Aronis", + comment = c(affiliation = "rOpenSci") + ), title = "Implementing Inheritance on the Connection Machine", institution = "Intelligent Systems Program, University of Pittsburgh", number = "ISP 93-1", @@ -233,6 +235,12 @@ test_that("TechReport to bibtex", { bibparsed <- as_cff(bib) bib <- as_bibentry(bibparsed) expect_snapshot(toBibtex(bib)) + + # Fallback when missing institution + bibparsed[[1]]$institution <- NULL + + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) }) test_that("Unpublished to bibtex", { @@ -258,6 +266,40 @@ test_that("Unpublished to bibtex", { expect_snapshot(toBibtex(bib)) }) +test_that("Test BibLateX entry", { + bib <- bibentry("Article", + author = "M. A. Kastenholz, and Philippe H. Hünenbergerb", + title = "Computation of methodology hyphen independent ionic solvation + free energies from molecular simulations", + journal = "J. Chem. Phys.", + year = 2006, + note = "Example modified for testing purposes", + pages = "55--65", + + # Additional BibLatex Fields + date = "2006-03-15", + file = "a_file.pdf", + issuetitle = "Semantic {3D} Media and Content", + translator = "Wicksteed, P. H. and {The translator factory}", + urldate = "2006-10-01", + pagetotal = 528, + abstract = "The computation of ionic solvation free energies from + atomistic simulations is a surprisingly difficult problem that + has found no satisfactory solution for more than 15 years.", + doi = "10.1063/1.2172593", + isbn = "0-816-52066-6", + issn = "0097-8493", + url = "http://www.ctan.org" + ) + expect_snapshot(toBibtex(bib)) + x <- as_cff(bib) + + + parsed <- as_bibentry(x) + expect_snapshot(toBibtex(parsed)) +}) + + # Other testers ---- test_that("particle names", { @@ -379,40 +421,6 @@ test_that("Fallback month", { }) -test_that("Test BibLateX entry", { - bib <- bibentry("Article", - author = "M. A. Kastenholz, and Philippe H. Hünenbergerb", - title = "Computation of methodology hyphen independent ionic solvation - free energies from molecular simulations", - journal = "J. Chem. Phys.", - year = 2006, - note = "Example modified for testing purposes", - pages = "55--65", - - # Additional BibLatex Fields - date = "2006-03-15", - file = "a_file.pdf", - issuetitle = "Semantic {3D} Media and Content", - translator = "Wicksteed, P. H. and {The translator factory}", - urldate = "2006-10-01", - pagetotal = 528, - abstract = "The computation of ionic solvation free energies from - atomistic simulations is a surprisingly difficult problem that - has found no satisfactory solution for more than 15 years.", - doi = "10.1063/1.2172593", - isbn = "0-816-52066-6", - issn = "0097-8493", - url = "http://www.ctan.org" - ) - expect_snapshot(toBibtex(bib)) - x <- as_cff(bib) - - - parsed <- as_bibentry(x) - expect_snapshot(toBibtex(parsed)) -}) - - test_that("Test Fallback year", { x <- cff() @@ -502,4 +510,35 @@ test_that("Parser return nulls", { expect_null(make_bibentry(NULL)) }) +test_that("Fallback month", { + bib <- bibentry("Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + number = 2, + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" + ) + + expect_identical(clean_str(bib[[1]]$month), "January") + x <- as_cff(bib) + + expect_identical(x[[1]]$month, "1") + + x[[1]]$month <- NULL + x[[1]]$`date-published` <- "2010-12-31" + bib2 <- as_bibentry(x) + + + expect_identical(clean_str(bib2[[1]]$month), "dec") + x2 <- as_cff(bib2) + expect_identical(x2[[1]]$month, "12") +}) + + # Classes ---- diff --git a/vignettes/bibtex_cff.Rmd b/vignettes/bibtex_cff.Rmd index 247e6ed7..74b012cb 100644 --- a/vignettes/bibtex_cff.Rmd +++ b/vignettes/bibtex_cff.Rmd @@ -493,6 +493,8 @@ We provide more detail on some of the mappings presented in the table above: {3--5}** would be parsed as [start: 3]{.underline}, [end: 5]{.underline} in [CFF]{.underline}. +#### BibLaTeX + Additionally, there are other [CFF]{.underline} keys that correspond to **BibLaTeX** fields. We propose to include these fields in the crosswalk[^6], even though they are not part of the core **BibTeX** fields definition. From 41ab3fcbfb9a4dafe11cd60fe5a3e1f426326de9 Mon Sep 17 00:00:00 2001 From: dieghernan Date: Mon, 4 Mar 2024 15:27:49 +0000 Subject: [PATCH 02/13] Start adding subclasses --- NAMESPACE | 1 + R/as_bibentry.R | 4 ++ R/as_cff.R | 91 ++++++++++++++++++++++++------- R/cff-methods.R | 31 +++++++++++ tests/testthat/test-cff-methods.R | 31 +++++++++++ 5 files changed, 139 insertions(+), 19 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 259686bd..179bfb12 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -3,6 +3,7 @@ S3method("[",cff_pers_list) S3method("[",cff_ref_list) S3method(as.data.frame,cff) +S3method(as.list,cff) S3method(as.person,cff) S3method(as_cff,Bibtex) S3method(as_cff,bibentry) diff --git a/R/as_bibentry.R b/R/as_bibentry.R index 5dfba8aa..77d27067 100644 --- a/R/as_bibentry.R +++ b/R/as_bibentry.R @@ -146,6 +146,7 @@ as_bibentry <- function(x, } # Cleanup + obj_extract <- as.list(obj_extract) obj_extract <- obj_extract[lengths(obj_extract) > 0] if (length(obj_extract) == 0) { return(NULL) @@ -163,6 +164,9 @@ make_bibentry <- function(x) { return(NULL) } + # Relist to cff for dispatching methods + x <- as_cff(x) + # Partially based on ruby parser # https://github.com/citation-file-format/ruby-cff/blob/main/lib/cff/ >> # (cont) formatter/bibtex_formatter.rb diff --git a/R/as_cff.R b/R/as_cff.R index cfef6fe9..23146616 100644 --- a/R/as_cff.R +++ b/R/as_cff.R @@ -145,33 +145,63 @@ as.cff <- function(x) { # Helper---- -#' Recursively clean lists and assign cff classes -#' to all nested lists +#' Recursively clean lists #' #' #' @noRd -rapply_cff <- function(x) { - if (inherits(x, "cff")) { - return(x) - } - +rapply_drop_null <- function(x) { if (is.list(x) && length(x) > 0) { x <- drop_null(x) - x <- lapply(x, rapply_cff) - return(structure(x, class = c("cff", "list"))) + x <- lapply(x, rapply_drop_null) + return(x) } else { return(x) } } + +rapply_class <- function(x) { + if (is_named(x)) x <- x[!duplicated(names(x))] + + xend <- lapply(x, function(el) { + xelement <- el + guess <- guess_cff_part(xelement) + + if (guess %in% c("unclear", "cff_full")) { + return(xelement) + } + + if (guess == "cff_pers_list") { + xelement <- lapply(xelement, function(j) { + j_in <- j + class(j_in) <- c("cff_pers", "cff", "list") + j_in + }) + class(xelement) <- c("cff_pers_list", "cff", "list") + } + + if (guess == "cff_ref_list") { + xelement <- lapply(xelement, function(j) { + j_in <- rapply_class(j) + class(j_in) <- c("cff_ref", "cff", "list") + j_in + }) + class(xelement) <- c("cff_ref_list", "cff", "list") + } + + if (guess %in% c("cff_ref", "cff_pers")) { + xin <- rapply_class(xelement) + class(xin) <- c(guess, "cff", "list") + xelement <- xin + } + return(xelement) + }) + xend +} + # https://adv-r.hadley.nz/s3.html#s3-constructor # Constructor new_cff <- function(x) { - if (is_cff(x)) { - class(x) <- c("cff", "list") - return(x) - } - # Clean all strings recursively x <- rapply(x, function(x) { @@ -187,13 +217,36 @@ new_cff <- function(x) { x <- drop_null(x) # Remove duplicated names if named - if (!is.null(names(x))) x <- x[!duplicated(names(x))] + if (is_named(x)) x <- x[!duplicated(names(x))] + + # Now apply drop null to nested lists + x <- lapply(x, rapply_drop_null) - # Now apply cff class to nested lists - x <- lapply(x, rapply_cff) + # Reclass nested + guess_x <- guess_cff_part(x) + if (guess_x == "cff_ref_list") { + x2 <- lapply(x, function(j) { + j2 <- rapply_class(j) + class(j2) <- c("cff_ref", "cff", "list") + j2 + }) + class(x2) <- c(guess_x, "cff", "list") + return(x2) + } + + xend <- rapply_class(x) + + final_class <- switch(guess_x, + "cff_full" = c("cff", "list"), + "unclear" = c("cff", "list"), + c(guess_x, "cff", "list") + ) + + if (!is.null(final_class)) { + class(xend) <- final_class + } - class(x) <- c("cff", "list") - x + xend } # Just for pretty printing on extract diff --git a/R/cff-methods.R b/R/cff-methods.R index d2c80e05..519a61bf 100644 --- a/R/cff-methods.R +++ b/R/cff-methods.R @@ -34,6 +34,16 @@ as.data.frame.cff <- function(x, row.names = NULL, optional = FALSE, ...) { if (inherits(x, "cff_ref_list")) { x_n <- list("references" = x) the_df <- cff_to_df(x_n) + } else if (inherits(x, "cff_pers_list")) { + n_l <- seq_len(length(x)) + end_df <- lapply(n_l, function(i) { + df <- as.data.frame(x[[i]]) + nm <- names(df) + names(df) <- paste0("person", ".", sprintf("%02d", i - 1), ".", nm) + return(df) + }) + + the_df <- do.call(cbind, end_df) } else if (is.null(names(x))) { the_df <- cff_list_to_df(x) } else { @@ -103,6 +113,27 @@ tail.cff <- function(x, n = 6L, ...) { } +#' List +#' +#' @noRd +#' @export +as.list.cff <- function(x, ...) { + xl <- rapply(x, function(x) { + if (is.list(x) || length(x) > 1) { + return(unclass(x)) + } + return(unclass(x)) + }, + how = "list" + ) + + as.list(xl) +} + + + + + #' @rdname as_bibentry #' @name toBibtex.cff #' @order 2 diff --git a/tests/testthat/test-cff-methods.R b/tests/testthat/test-cff-methods.R index e1423439..208222a7 100644 --- a/tests/testthat/test-cff-methods.R +++ b/tests/testthat/test-cff-methods.R @@ -251,3 +251,34 @@ test_that("toBibtex", { froml <- toBibtex(cff_read_bib_text(string)) expect_equal(sum(names(froml) == "title"), 1) }) + + +test_that("as.list", { + f <- system.file("examples/CITATION_complete.cff", package = "cffr") + + full_cff <- cff_read_cff_citation(f) + + # Capture dput and search + dput_cff <- capture.output(dput(full_cff)) + + ntot_class <- sum(grepl("\"cff\"", dput_cff)) + + expect_gt(ntot_class, 30) + + # Unlist + unl <- as.list(full_cff) + + # Capture dput and search + dput_unl <- capture.output(dput(unl)) + + ntot_class_unl <- sum(grepl("\"cff\"", unl)) + + expect_identical(ntot_class_unl, 0L) + + # Reclass + regen_cff <- as_cff(unl) + + expect_identical(full_cff, regen_cff) + + expect_true(cff_validate(regen_cff, verbose = FALSE)) +}) From 8b243da51dcdc2a5e50522e0605175c0b243864b Mon Sep 17 00:00:00 2001 From: Diego H Date: Mon, 4 Mar 2024 21:20:03 +0100 Subject: [PATCH 03/13] Add more tests --- R/as_bibentry.R | 2 +- R/as_cff_person.R | 117 +++++++++++++++----------------- R/utils.R | 7 +- README.Rmd | 15 ++-- README.md | 69 ++++++++++++++++++- codemeta.json | 4 +- data/cran_to_spdx.rda | Bin 907 -> 916 bytes inst/schemaorg.json | 2 +- man/as_cff_person.Rd | 20 ++++-- tests/testthat/_snaps/as_cff.md | 64 +++++++++++++++++ tests/testthat/test-as_cff.R | 62 ++++++++++++++++- 11 files changed, 279 insertions(+), 83 deletions(-) diff --git a/R/as_bibentry.R b/R/as_bibentry.R index 77d27067..94451ff3 100644 --- a/R/as_bibentry.R +++ b/R/as_bibentry.R @@ -528,7 +528,7 @@ get_bib_month <- function(x) { m <- as.integer(format(as.Date(x$`date-published`), "%m")) } - # Try to parse to 3 month string + # Try to get 3 month string m_int <- suppressWarnings(as.integer(m)) m_letters <- clean_str(tolower(month.abb[m_int])) diff --git a/R/as_cff_person.R b/R/as_cff_person.R index eb930040..c3b4fd07 100644 --- a/R/as_cff_person.R +++ b/R/as_cff_person.R @@ -1,8 +1,8 @@ -#' Create a person with the corresponding [`cff`] structure +#' Create a `person` with the corresponding [`cff`] structure #' #' @description #' -#' Create a `person` or `entity` as defined by the +#' Create a list of `person` or `entity` as defined by the #' #' ```{r, echo=FALSE, results='asis'} #' @@ -38,16 +38,18 @@ #' See **Examples**. #' #' @return -#' `as_cff_person()` returns A list of persons or entities with class -#' `cff` converted to the +#' `as_cff_person()` returns a list of persons or entities with class `cff` and +#' subclass `cff_pers_list`, converted to the #' ```{r, echo=FALSE, results='asis'} #' #' cat(paste0(" [Citation File Format schema]", #' "(https://github.com/citation-file-format/", -#' "citation-file-format/blob/main/schema-guide.md).")) +#' "citation-file-format/blob/main/schema-guide.md). ")) #' #' #' ``` +#' Each element of the `cff_pers_list` would have a class `cff` and a subclass +#' `cff_pers`. #' #' @details #' @@ -107,12 +109,20 @@ #' #' cff_person <- as_cff_person(a_person) #' +#' # Class cff and a special subclass +#' class(cff_person) +#' +#' # With each element with other special subclass +#' +#' class(cff_person[[1]]) +#' +#' # Print #' cff_person #' #' # Back to person object with S3 Method #' as.person(cff_person) #' -#' # Parse a string +#' # Coerce a string #' a_str <- paste0( #' "Julio Iglesias ", #' "()" @@ -156,17 +166,8 @@ as_cff_person <- function(person) { if (!length(the_obj) > 0) { return(NULL) } - the_obj <- new_cff(the_obj) - - # Add classes - cff_pers_class <- lapply(the_obj, function(x) { - class(x) <- unique(c("cff_pers", "cff", class(x))) - x - }) - - class(cff_pers_class) <- c("cff_pers_list", "cff", "list") - - cff_pers_class + the_obj <- as_cff(the_obj) + the_obj } create_person_from_r <- function(person) { @@ -210,24 +211,24 @@ create_person_from_r <- function(person) { if (is_entity) { as_bib_text <- paste(c(person$family, person$given), collapse = " ") # And protect it - as_bib_text <- paste0("{", as_bib_text, "}") + as_bib_text <- protect_bib_braces(as_bib_text) } else { # Use von Family, Junior, Given # Protect given giv <- paste0(person$given, collapse = " ") - giv <- paste0("{", giv, "}") + giv <- protect_bib_braces(giv) as_bib_text <- paste0(c(person$family, giv), collapse = ", ") } - parsed_person <- create_person_from_txt(as_bib_text) - parsed_comments <- extract_person_comments(person) + pers_cff <- create_person_from_txt(as_bib_text) + comm_cff <- extract_person_comments(person) # Add comments - parsed_person <- c(parsed_person, parsed_comments) + pers_cff <- c(pers_cff, comm_cff) # Validate fields - parsed_person <- validate_cff_person_fields(parsed_person) - parsed_person + pers_cff <- validate_cff_person_fields(pers_cff) + pers_cff } create_person_from_txt <- function(as_bib_text) { @@ -254,20 +255,20 @@ create_person_from_txt <- function(as_bib_text) { # Fake a person object to extract comments fake_person <- paste0("{Fake} ", comment_only) - parsed_comments <- extract_person_comments(fake_person) + comm_cff <- extract_person_comments(fake_person) } else { # Does not person_only <- as_bib_text - parsed_comments <- list() + comm_cff <- list() } # Special case for Bioconductor if (is_substring(tolower(person_only), "bioconductor")) { - person_only <- paste0("{", person_only, "}") + person_only <- protect_bib_braces(person_only) } # Special case for R Core Team if (is_substring(tolower(person_only), "r core")) { - person_only <- paste0("{", person_only, "}") + person_only <- protect_bib_braces(person_only) } @@ -283,10 +284,9 @@ create_person_from_txt <- function(as_bib_text) { perl = TRUE ) - commas <- as.character(lengths(regmatches( - protected, - gregexpr(",", protected) - ))) + commas <- as.character( + lengths(regmatches(protected, gregexpr(",", protected))) + ) # Assign the corresponding fun bibtex_name_str <- switch(commas, @@ -302,10 +302,7 @@ create_person_from_txt <- function(as_bib_text) { # Clean bibtex_name_str <- lapply(bibtex_name_str, function(z) { - if (is.null(z)) { - return(NULL) - } - if (any((is.na(z) | z == ""))) { + if (is.null(clean_str(z))) { return(NULL) } @@ -313,13 +310,13 @@ create_person_from_txt <- function(as_bib_text) { clean_str(cleaned) }) - # Final parsed person + # Final person if (is.null(bibtex_name_str$given)) { ent <- c(bibtex_name_str$von, bibtex_name_str$family, bibtex_name_str$jr) ent <- clean_str(paste(ent, collapse = " ")) - parsed_person <- list(name = ent) + pers_cff <- list(name = ent) } else { - parsed_person <- list( + pers_cff <- list( "family-names" = bibtex_name_str$family, "given-names" = bibtex_name_str$given, "name-particle" = bibtex_name_str$von, @@ -327,14 +324,14 @@ create_person_from_txt <- function(as_bib_text) { ) } - parsed_person <- parsed_person[!lengths(parsed_person) == 0] + pers_cff <- pers_cff[!lengths(pers_cff) == 0] # Add comments - parsed_person <- c(parsed_person, parsed_comments) + pers_cff <- c(pers_cff, comm_cff) # Validate fields - parsed_person <- validate_cff_person_fields(parsed_person) - parsed_person + pers_cff <- validate_cff_person_fields(pers_cff) + pers_cff } guess_hint <- function(person) { @@ -408,10 +405,10 @@ extract_person_comments <- function(person) { person <- as.person(person) # Extract from comments - parsed_comments <- as.list(person$comment) - names(parsed_comments) <- tolower(names(parsed_comments)) - nms_com <- names(parsed_comments) - comment_as_text <- tolower(clean_str(parsed_comments)) + comm_cff <- as.list(person$comment) + names(comm_cff) <- tolower(names(comm_cff)) + nms_com <- names(comm_cff) + comment_as_text <- tolower(clean_str(comm_cff)) # Special case when coerced from text, only can extract orcid and web if (all( @@ -431,40 +428,40 @@ extract_person_comments <- function(person) { web <- url_comment[!grepl("orcid.org/", url_comment)][1] # Reset comment list - parsed_comments <- list() + comm_cff <- list() - parsed_comments$orcid <- clean_str(orcid) - parsed_comments$website <- clean_str(web) + comm_cff$orcid <- clean_str(orcid) + comm_cff$website <- clean_str(web) } # Add url to orcid if not present - # Parse leading invalid urls + # Get leading invalid urls - if (!is.null(parsed_comments$orcid)) { - orcid <- gsub("^orcid.org/", "", parsed_comments$orcid) + if (!is.null(comm_cff$orcid)) { + orcid <- gsub("^orcid.org/", "", comm_cff$orcid) orcid <- gsub("^https://orcid.org/", "", orcid) orcid <- gsub("^http://orcid.org/", "", orcid) - parsed_comments$orcid <- paste0("https://orcid.org/", orcid) + comm_cff$orcid <- paste0("https://orcid.org/", orcid) } # Add website - web <- parsed_comments$website + web <- comm_cff$website if (!is.null(web)) { - parsed_comments$website <- clean_str(web[is_url(web)]) + comm_cff$website <- clean_str(web[is_url(web)]) } # Add also email # Check if several mails (MomTrunc 6.0) - look_emails <- c(unlist(person$email), parsed_comments$email) + look_emails <- c(unlist(person$email), comm_cff$email) valid_emails <- unlist(lapply(look_emails, is_email)) email <- look_emails[valid_emails][1] # Final list fin_list <- c( list(email = NULL), - parsed_comments["email" != names(parsed_comments)] + comm_cff["email" != names(comm_cff)] ) fin_list$email <- clean_str(email) @@ -474,7 +471,3 @@ extract_person_comments <- function(person) { protect_bib_braces <- function(x) { paste0("{", x, "}") } - -cff_person_to_string <- function(x) { - -} diff --git a/R/utils.R b/R/utils.R index 1125fcf4..298165c1 100644 --- a/R/utils.R +++ b/R/utils.R @@ -116,8 +116,11 @@ detect_repos <- function(repos = getOption("repos")) { fuzzy_keys <- function(keys) { nm <- names(keys) names(keys) <- gsub("_", "-", nm, fixed = TRUE) - valid_keys <- cff_schema_keys() - + valid_keys <- unique(c( + cff_schema_keys(), cff_schema_definitions_entity(), + cff_schema_definitions_person(), + cff_schema_definitions_refs() + )) names <- names(keys) # Check valid keys as is is_valid_key <- names %in% valid_keys diff --git a/README.Rmd b/README.Rmd index 09167f48..71cccf60 100644 --- a/README.Rmd +++ b/README.Rmd @@ -15,7 +15,8 @@ knitr::opts_chunk$set( ) ``` -# cffr cffr website +# cffr cffr website + [![CRAN-status](https://www.r-pkg.org/badges/version/cffr)](https://CRAN.R-project.org/package=cffr) @@ -90,7 +91,10 @@ token <- token[!token %in% c(NA, NULL, "")][1] ghtoken <- paste("token", token) tmpfile <- tempfile(fileext = ".json") # Get numbers of repos -api_url <- "https://api.github.com/search/code?q=cffr+extension:cff+filename:CITATION" +api_url <- paste0( + "https://api.github.com/search/code?q=cffr+extension:", + "cff+filename:CITATION" +) res <- tryCatch( download.file(api_url, tmpfile, @@ -114,9 +118,10 @@ if (isTRUE(res)) { } else { nreps <- as.integer(jsonlite::read_json(tmpfile)$total_count) cat(paste0( - "As per ", today, " there are at least ", nreps, " repos on GitHub using **cffr**. ", - "[Check them out here]", - "(https://github.com/search?q=cffr%20path%3A**%2FCITATION.cff&type=code)." + "As per ", today, " there are at least ", nreps, + " repos on GitHub using **cffr**. ", "[Check them out here]", + "(https://github.com/search?q=cffr%20path%3A**%2FCITATION.", + "cff&type=code)." )) } cat("\n") diff --git a/README.md b/README.md index 530ec139..954bed00 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -# cffr cffr website +# cffr cffr website @@ -72,7 +72,7 @@ file and the `CITATION` file (if present) of your package. Note that **cffr** works best if your package pass `R CMD check/devtools::check()`. -As per 2024-03-04 there are at least 302 repos on GitHub using **cffr**. +As per 2024-03-04 there are at least 298 repos on GitHub using **cffr**. [Check them out here](https://github.com/search?q=cffr%20path%3A**%2FCITATION.cff&type=code). @@ -561,6 +561,26 @@ test <- cff_create("rmarkdown") - family-names: Chirico. given-names: Michael year: '2024' + - type: software + title: dygraphs + abstract: 'dygraphs: Interface to ''Dygraphs'' Interactive Time Series Charting + Library' + notes: Suggests + url: https://github.com/rstudio/dygraphs + repository: https://CRAN.R-project.org/package=dygraphs + authors: + - family-names: Vanderkam + given-names: Dan + website: http://dygraphs.com/ + - family-names: Allaire + given-names: JJ + - family-names: Owen + given-names: Jonathan + - family-names: Gromer + given-names: Daniel + - family-names: Thieurmel + given-names: Benoit + year: '2024' - type: software title: fs abstract: 'fs: Cross-Platform File System Operations Based on ''libuv''' @@ -577,6 +597,26 @@ test <- cff_create("rmarkdown") given-names: Gábor email: csardi.gabor@gmail.com year: '2024' + - type: software + title: rsconnect + abstract: 'rsconnect: Deploy Docs, Apps, and APIs to ''Posit Connect'', ''shinyapps.io'', + and ''RPubs''' + notes: Suggests + url: https://rstudio.github.io/rsconnect/ + repository: https://CRAN.R-project.org/package=rsconnect + authors: + - family-names: Atkins + given-names: Aron + email: aron@posit.co + - family-names: Allen + given-names: Toph + - family-names: Wickham + given-names: Hadley + - family-names: McPherson + given-names: Jonathan + - family-names: Allaire + given-names: JJ + year: '2024' - type: software title: downlit abstract: 'downlit: Syntax Highlighting and Automatic Linking' @@ -589,6 +629,19 @@ test <- cff_create("rmarkdown") email: hadley@posit.co year: '2024' version: '>= 0.4.0' + - type: software + title: katex + abstract: 'katex: Rendering Math to HTML, ''MathML'', or R-Documentation Format' + notes: Suggests + url: https://docs.ropensci.org/katex/ + repository: https://CRAN.R-project.org/package=katex + authors: + - family-names: Ooms + given-names: Jeroen + email: jeroen@berkeley.edu + orcid: https://orcid.org/0000-0002-4035-0289 + year: '2024' + version: '>= 1.4.0' - type: software title: sass abstract: 'sass: Syntactically Awesome Style Sheets (''Sass'')' @@ -699,6 +752,18 @@ test <- cff_create("rmarkdown") given-names: Davis email: davis@posit.co year: '2024' + - type: software + title: cleanrmd + abstract: 'cleanrmd: Clean Class-Less ''R Markdown'' HTML Documents' + notes: Suggests + url: https://pkg.garrickadenbuie.com/cleanrmd/ + repository: https://CRAN.R-project.org/package=cleanrmd + authors: + - family-names: Aden-Buie + given-names: Garrick + email: garrick@adenbuie.com + orcid: https://orcid.org/0000-0002-7111-0077 + year: '2024' - type: software title: withr abstract: 'withr: Run Code ''With'' Temporarily Modified Global State' diff --git a/codemeta.json b/codemeta.json index ba3907f6..a771f6dd 100644 --- a/codemeta.json +++ b/codemeta.json @@ -14,7 +14,7 @@ "name": "R", "url": "https://r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31)", + "runtimePlatform": "R version 4.3.2 (2023-10-31 ucrt)", "provider": { "@id": "https://cran.r-project.org", "@type": "Organization", @@ -200,7 +200,7 @@ }, "isPartOf": "https://ropensci.org", "keywords": ["attribution", "citation", "credit", "citation-files", "cff", "metadata", "r", "r-package", "citation-file-format", "rstats", "ropensci", "cran"], - "fileSize": "947.426KB", + "fileSize": "975.452KB", "citation": [ { "@type": "ScholarlyArticle", diff --git a/data/cran_to_spdx.rda b/data/cran_to_spdx.rda index 33fae2994ac0070f09336d3d8c403ec099798a1f..0bd1435cae708e762b962cb7a97ec9ae0c12fef9 100644 GIT binary patch literal 916 zcmV;F18e*riwFP!000002F+JXPuoBccH=-unm|gSm3lH&4Vna`W6qYoKWTU7l z6%~ibghjRFILLPBCXZ^{hliR@wmg?9cUwC}dynSrJ>vGR3u>%N zH7^9s2jmCN)MFU+jvqQf=!^lyHV7uMxN$r@(R46&?O7nkAL|Ucu8wsheU#`}%-}Kg zct}p6f|sD3J$Vg6?f4@&g<#mx_GkbE&SM;}8efM{{dWsb-+YHfc*I(skDzQMQuc9 z1!Aohx0C}Wz@61{kAy z&Yir|26bnZ!6Z%^)E>i>qx;N-Fqt-QvgfmsNZIvBwAm0ab2)67k|eIP;A`!G#>mpM z^iZ=B9+QksbB1dyET}lE|DM>xOF{e0ntpR!*DkoCa;-+&L=l6|gqH=3{IpT6Ecq3S zYeuoij3tXyK2yb3u8OVv0aVXC*O@pWx)Nr+Lh>6AcYHg*)N{XbpuvMrDCh&%#!Z-b zl8jZht*40o7ZBOVmSVVL106tdg;alC4R?J;u`wz#?3 zjfZj`(dQt-@SQK_UFh<3UL@mGl-u{nq~C0?X$Sm-txHN&hBcFxl`GI@LE+XjzF=3V z{08ZT_c^er!9v~w*h7PCW`0WWB0K*ZUKH0qz)3MkD`ZhnY^`QN@PAw(38+v}F}WMf z%vENY4z3n27R)i3_&a+GMMZe~UGnS+3epi+%Hr$8?&+{g{tFEUC+Ffm{5BSleFUss q*`d9A?PHSyz3NXt?~+F~qDOtlUw3zRKk(1b=<^FCJhi~74*&oy4!s@# literal 907 zcmV;619bdCT4*^jL0KkKS>9zRc>o3)f6)K`|B!TNe+NIOUueH)-|#>H0ssI3&;$PH zMJcLQXdxn$LqY0#n*AnjWXMA%O-!8VroodWVq>8hVWZpyrJl8ess_0%@QOhK3*w7>x`OkO2~jFe$2d zXwawXnjVu)lzN&O4WcqVOllbqQxUPLFe>dz=&r?fZ9@1WKjajLMimfmJ2mUYPPPXx zj*Ybg1&%$?qNxPUhn5dz<$F-rN4=W12TeQiUlnt8<0b6Yj7qp-b#>kRV}V z`Fj%x=Ccapm4j`8K(v)wkqAVgQwwED3edK<)|qCdvuV!(PmL8wD1-^BC2K`h&j3QP zL0u36=!%xXc0(x;6=KLKJiY@Gylf7R8cZa*UN&lz^7{|Bd%yVmS_cO2<5+P%UUOvX zSz@tbnf*NMPJ>Nh!Uq@-^UcwV*1cylE@5sg3T+_5-MXcG?wWWD;2vA3o z&8+4_l-WR}LCwk=!sx_DBpAAMWvtJbNN+NfJb=(2mY@It diff --git a/inst/schemaorg.json b/inst/schemaorg.json index 015b552c..ce1c62ba 100644 --- a/inst/schemaorg.json +++ b/inst/schemaorg.json @@ -26,6 +26,6 @@ "name": "Comprehensive R Archive Network (CRAN)", "url": "https://cran.r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31)", + "runtimePlatform": "R version 4.3.2 (2023-10-31 ucrt)", "version": "0.99.0.9000" } diff --git a/man/as_cff_person.Rd b/man/as_cff_person.Rd index 6b11aa1f..3aaa09f7 100644 --- a/man/as_cff_person.Rd +++ b/man/as_cff_person.Rd @@ -3,7 +3,7 @@ \name{as_cff_person} \alias{as_cff_person} \alias{as.person.cff} -\title{Create a person with the corresponding \code{\link{cff}} structure} +\title{Create a \code{person} with the corresponding \code{\link{cff}} structure} \usage{ as_cff_person(person) @@ -20,13 +20,15 @@ See \strong{Examples}. \item{x}{\code{cff} object representing a person or entity.} } \value{ -\code{as_cff_person()} returns A list of persons or entities with class -\code{cff} converted to the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. +\code{as_cff_person()} returns a list of persons or entities with class \code{cff} and +subclass \code{cff_pers_list}, converted to the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. +Each element of the \code{cff_pers_list} would have a class \code{cff} and a subclass +\code{cff_pers}. \code{as.person.cff()} returns a \code{person} object. } \description{ -Create a \code{person} or \code{entity} as defined by the +Create a list of \code{person} or \code{entity} as defined by the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. \code{\link[=as_cff_person]{as_cff_person()}} can convert the following objects: @@ -96,12 +98,20 @@ a_person cff_person <- as_cff_person(a_person) +# Class cff and a special subclass +class(cff_person) + +# With each element with other special subclass + +class(cff_person[[1]]) + +# Print cff_person # Back to person object with S3 Method as.person(cff_person) -# Parse a string +# Coerce a string a_str <- paste0( "Julio Iglesias ", "()" diff --git a/tests/testthat/_snaps/as_cff.md b/tests/testthat/_snaps/as_cff.md index 9c131a6b..3b42a6aa 100644 --- a/tests/testthat/_snaps/as_cff.md +++ b/tests/testthat/_snaps/as_cff.md @@ -18,6 +18,18 @@ affiliation: Real Madrid website: https://www.google.com/ +--- + + Code + single_cff + Output + family-names: person + given-names: A + email: fake@gmail.com + orcid: https://orcid.org/0000-0000-0000-0000 + affiliation: Real Madrid + website: https://www.google.com/ + # as_cff.bibentry, toBibtex Code @@ -30,6 +42,18 @@ editors: - name: Editor +--- + + Code + b_single_cff + Output + type: generic + title: title + authors: + - name: Author + editors: + - name: Editor + # as_cff.default Code @@ -37,3 +61,43 @@ Output a: '1' +# Other convertes + + Code + noadd <- cff(chocolate = "New York", version = 5) + Message + i Found misspelled keys. Trying to map: + x chocolate: No match, removing. + +# Reading full cff + + Code + df + Output + class + authors cff_pers_list|cff|list + contact cff_pers_list|cff|list + identifiers cff_ref_list|cff|list + references cff_ref_list|cff|list + preferred-citation cff_ref|cff|list + +--- + + Code + df2 + Output + class + authors cff_pers_list|cff|list + contact cff_pers_list|cff|list + editors cff_pers_list|cff|list + editors-series cff_pers_list|cff|list + recipients cff_pers_list|cff|list + senders cff_pers_list|cff|list + translators cff_pers_list|cff|list + conference cff_pers|cff|list + database-provider cff_pers|cff|list + institution cff_pers|cff|list + location cff_pers|cff|list + publisher cff_pers|cff|list + identifiers cff_ref_list|cff|list + diff --git a/tests/testthat/test-as_cff.R b/tests/testthat/test-as_cff.R index b31c7359..7edfd5e0 100644 --- a/tests/testthat/test-as_cff.R +++ b/tests/testthat/test-as_cff.R @@ -6,7 +6,7 @@ test_that("as.cff still works", { expect_silent(l1 <- as_cff(l)) expect_silent(l2 <- as.cff(l)) - expect_s3_class(l1, c("cff", "list")) + expect_s3_class(l1, c("cff", "list"), exact = TRUE) expect_snapshot(l2) }) @@ -22,8 +22,18 @@ test_that("as_cff.person", { ) ) - expect_identical(as_cff(pers), as_cff_person(pers)) + expect_silent(aa <- as_cff_person(pers)) + expect_s3_class(aa, c("cff_pers_list", "cff", "list"), exact = TRUE) + expect_s3_class(aa[[1]], c("cff_pers", "cff", "list"), exact = TRUE) + expect_identical(aa, as_cff_person(pers)) expect_snapshot(as_cff(pers)) + + # Check a single person + single <- as.list(aa)[[1]] + expect_false(inherits(single, "cff")) + single_cff <- as_cff(single) + expect_s3_class(single_cff, c("cff_pers", "cff", "list"), exact = TRUE) + expect_snapshot(single_cff) }) test_that("as_cff.bibentry, toBibtex", { @@ -44,6 +54,17 @@ test_that("as_cff.bibentry, toBibtex", { bbbb <- as_cff(b_bib) expect_identical(bbb, bbbb) + + # Check single + b_single <- as.list(bbb)[[1]] + expect_false(inherits(b_single, "cff")) + b_single_cff <- as_cff(b_single) + expect_s3_class(b_single_cff, c("cff_ref", "cff", "list"), exact = TRUE) + expect_snapshot(b_single_cff) + + # Check empty + s <- bibentry("misc") + expect_null(as_cff(s)) }) @@ -62,7 +83,7 @@ test_that("Other convertes", { expect_true(is_cff(a)) expect_s3_class(a, "cff") - expect_message(noadd <- cff(address = "New York", version = 5)) + expect_snapshot(noadd <- cff(chocolate = "New York", version = 5)) expect_true(is_cff(noadd)) expect_false(is_cff(list(a = 1, b = 2))) expect_true(is_cff(as_cff(list(a = 1, b = 2)))) @@ -105,3 +126,38 @@ test_that("]] cff_pers", { expect_length(b2_reg, 1) expect_s3_class(b2_reg, c("cff_pers", "cff", "list"), exact = TRUE) }) + +# Check full classes with recursion +test_that("Reading full cff", { + full <- system.file("examples/CITATION_complete.cff", package = "cffr") + cff_complete <- cff_read(full) + + nm <- names(cff_complete) + + class_v <- vapply(nm, function(x) { + clean_str(paste0(class(cff_complete[[x]]), collapse = "|")) + }, character(1)) + + df <- data.frame(class = sort(class_v[class_v != "character"])) + + expect_snapshot(df) + + # Assess preferred + pref <- cff_complete$`preferred-citation` + nm2 <- names(pref) + + class_v2 <- vapply(nm2, function(x) { + clean_str(paste0(class(pref[[x]]), collapse = "|")) + }, character(1)) + + df2 <- data.frame(class = sort(class_v2[class_v2 != "character"])) + + + expect_snapshot(df2) + + # Relist + asl <- as.list(cff_complete) + + expect_false(inherits(asl, "cff")) + expect_identical(cff_complete, as_cff(asl)) +}) From 53b1918318b602660e75eecf5b47069e4149b31b Mon Sep 17 00:00:00 2001 From: dieghernan Date: Tue, 5 Mar 2024 11:02:25 +0000 Subject: [PATCH 04/13] Add more methods --- NAMESPACE | 6 + NEWS.md | 50 ++- R/as_bibentry.R | 300 ++---------------- R/as_cff.R | 28 +- R/as_cff_person.R | 183 ++--------- R/cff.R | 2 +- R/docs.R | 2 +- R/{cff-methods.R => methods.R} | 145 +++++---- R/utils-bib.R | 250 +++++++++++++++ R/utils-persons.R | 134 ++++++++ inst/WORDLIST | 6 - man/as_bibentry.Rd | 69 ++-- man/as_cff.Rd | 2 +- man/as_cff_person.Rd | 73 +++-- man/cff.Rd | 2 +- man/{cff-class.Rd => cff_class.Rd} | 9 +- man/cff_read_bib_text.Rd | 2 +- man/chunks/value.Rmd | 2 +- man/deprecated_cff_to_bib.Rd | 2 +- pkgdown/_pkgdown.yml | 2 +- tests/testthat/_snaps/as_cff.md | 40 +-- tests/testthat/_snaps/cff_read_bib_text.md | 2 +- .../_snaps/{cff-methods.md => methods.md} | 45 +++ tests/testthat/test-as_bibentry.R | 3 - tests/testthat/test-as_cff.R | 22 +- tests/testthat/test-as_cff_reference.R | 2 +- tests/testthat/test-cff_create.R | 8 +- tests/testthat/test-cff_read.R | 22 +- .../{test-cff-methods.R => test-methods.R} | 36 ++- 29 files changed, 818 insertions(+), 631 deletions(-) rename R/{cff-methods.R => methods.R} (58%) create mode 100644 R/utils-bib.R rename man/{cff-class.Rd => cff_class.Rd} (97%) rename tests/testthat/_snaps/{cff-methods.md => methods.md} (98%) rename tests/testthat/{test-cff-methods.R => test-methods.R} (88%) diff --git a/NAMESPACE b/NAMESPACE index 179bfb12..13b848fd 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -5,6 +5,10 @@ S3method("[",cff_ref_list) S3method(as.data.frame,cff) S3method(as.list,cff) S3method(as.person,cff) +S3method(as.person,cff_pers) +S3method(as.person,cff_pers_list) +S3method(as.person,cff_ref) +S3method(as.person,cff_ref_list) S3method(as_cff,Bibtex) S3method(as_cff,bibentry) S3method(as_cff,default) @@ -15,6 +19,8 @@ S3method(head,cff) S3method(print,cff) S3method(tail,cff) S3method(toBibtex,cff) +S3method(toBibtex,cff_pers) +S3method(toBibtex,cff_pers_list) export(as.cff) export(as_bibentry) export(as_cff) diff --git a/NEWS.md b/NEWS.md index 1b4080bc..c42f1cf8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -6,6 +6,46 @@ to non-core functions**, hence the natural workflow (`cff_create()` → ## Major changes +### Classes and methods + +Now **cffr** implements a new class system for +[`definitions.reference`](https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsreference), +[`definitions.person`](https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsperson) +and +[`definitions.entity`](https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsentity) +objects: + +- List of `definitions.reference` (e.g, `references)` has class + `cff_ref_list, cff` and individual elements (e.g `preferred-citation` or + each member of `references`) has class `cff_ref, cff`. +- List of `definitions.person` or `definitions.entity` (e.g. `authors`, + `contact`) has class `cff_pers_list, cff` and individual elements (e.g + `publisher` or each member of `authors`) has class `cff_pers, cff`. + +This change allow to write specific [S3 +Methods](https://adv-r.hadley.nz/s3.html) and extend the capabilities of the +package. + +- New `as_cff()` S3 generic method (replacing `as.cff()`): This method coerces + **R** objects to `cff` class format. Current methods provided are: + - `as_cff.Bibtex()`. + - `as_cff.bibentry()`, replacing cff_parse_citation(). + - `as_cff.person()`, similar to `as_cff_person()` but only for `person` + objects. We recommend using `as_cff_person()` since it can parse also + string representing authors in BibTeX markup (`"{von Neumen}, James"`), + that can't be captured properly via methods. +- The following **base** and **utils** methods supports now `cff` class: + (TODO) + - `as.data.frame.cff()`. + - `as.person.cff()`, that provides results **only** for CFF keys defined + as + [person](https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsperson) + or + [entity](https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsentity) + (e.g. `authors`, `contacts`, `editors`, `publisher,` etc.). + - `head.cff()`, `tail.cff()`. + - `toBibtex.cff()`. + ### API The API has been completely reviewed to provide more clarity on functions naming @@ -33,14 +73,6 @@ would warn when used, providing advice on the replacement function. ### New capabilities -- New `as_cff()` S3 generic method (replacing `as.cff()`): This method coerces - **R** objects to `cff-class` format. Current methods provided are: - - `as_cff.Bibtex()`. - - `as_cff.bibentry()`, replacing cff_parse_citation(). - - `as_cff.person()`, similar to `as_cff_person()` but only for `person` - objects. We recommend using `as_cff_person()` since it can parse also - string representing authors in BibTeX markup (`"{von Neumen}, James"`), - that can't be captured properly via methods. - Now reading from external files is performed exclusively by `cff_read()` (that is designed to fit all supported file types on a single entry point) and the new specific readers (that are used under the hood by `cff_read()`), @@ -54,8 +86,6 @@ would warn when used, providing advice on the replacement function. ## Other changes - Minimum **R** version required now is **4.0.0**. -- Now `class()` of `cff` objects are `c("cff", "list")` instead of single - value (`"cff"`). - New S3 **base** and **utils** methods added: - `as.data.frame.cff().` - `as.person.cff()`, that provides results **only** for CFF keys defined diff --git a/R/as_bibentry.R b/R/as_bibentry.R index 94451ff3..4ebab176 100644 --- a/R/as_bibentry.R +++ b/R/as_bibentry.R @@ -6,20 +6,19 @@ #' @description #' -#' This function creates `bibentry` objects (see [utils::bibentry()]) from -#' different metadata sources ([`cff`] objects, `DESCRIPTION` files, etc.). -#' Note that a **R** `bibentry` object is the representation of a BibTeX entry, -#' see **Examples** +#' This function creates [`bibentry`][utils::bibentry()] objects from different +#' metadata sources ([`cff`] objects, `DESCRIPTION` files, etc.). #' -#' The function tries to map the information of the source `x` into a [`cff`] -#' object and performs a mapping of the metadata to BibTeX, according to -#' `vignette("bibtex_cff", "cffr")`. #' #' The inverse transformation (`bibentry` object to [`cff`] reference) can -#' be done with the corresponding [as_cff()] method. +#' be done with the corresponding [as_cff.bibentry()] method. #' #' @seealso -#' [utils::bibentry()] +#' [utils::bibentry()] to understand more about the `bibentry` class. +#' +#' `vignette("bibtex_cff", "cffr")` provides detailed information about the +#' internal mapping performed between `cff` objects and BibTeX markup ( +#' both `cff` to BibTeX and BibTeX to `cff`). #' #' @references #' - Patashnik, Oren. "BIBTEXTING" February 1988. @@ -35,7 +34,7 @@ #' #' @param x The source that would be used for generating #' the [bibentry()] object via \CRANpkg{cffr}. It could be: -#' * A missing value. That would retrieve the DESCRIPTION +#' * A missing value. That would retrieve the `DESCRIPTION` #' file on your in-development package. #' * An existing `cff` object created with [cff()], [cff_create()] or #' [as_cff()]. @@ -52,6 +51,19 @@ #' @family bibtex #' @family coercing #' +#' +#' @details +#' +#' A **R** [`bibentry`][utils::bibentry()] object is the representation of a +#' BibTeX entry. These objects can be converted to BibTeX markup with +#' [utils::toBibtex()], that creates an object of class `Bibtex` and can be +#' printed and exported as a valid BibTeX entry. +#' +#' +#' `as_bibtex()` tries to map the information of the source `x` into a [`cff`] +#' object and performs a mapping of the metadata to BibTeX, according to +#' `vignette("bibtex_cff", "cffr")`. +#' #' @return #' `as_bibentry()` returns a `bibentry` object (or a list of `bibentry` #' objects). @@ -60,7 +72,7 @@ #' #' @examples #' \donttest{ -#' # From a cff object +#' # From a cff object ---- #' cff_object <- cff() #' #' cff_object @@ -73,29 +85,26 @@ #' bib #' #' # Print as bibtex -#' #' toBibtex(bib) #' #' # Thanks to the S3 Method we can also do -#' #' toBibtex(cff_object) #' -#' # From a CITATION.cff file with options +#' # Other sources ---- +#' # From a CITATION.cff #' #' path <- system.file("examples/CITATION_complete.cff", package = "cffr") -#' cff_file <- as_bibentry(path, what = "all") -#' -#' toBibtex(cff_file) +#' cff_file <- as_bibentry(path) #' -#' # For an installed package +#' cff_file #' -#' installed_package <- as_bibentry("jsonvalidate") +#' # For an installed package with options +#' installed_package <- as_bibentry("jsonvalidate", what = "all") #' -#' toBibtex(installed_package) +#' installed_package #' #' #' # Use a DESCRIPTION file -#' #' path2 <- system.file("examples/DESCRIPTION_gitlab", package = "cffr") #' desc_file <- as_bibentry(path2) #' @@ -164,7 +173,7 @@ make_bibentry <- function(x) { return(NULL) } - # Relist to cff for dispatching methods + # Relist to cff for dispatching methods on persons x <- as_cff(x) # Partially based on ruby parser @@ -307,249 +316,4 @@ make_bibentry <- function(x) { return(bib) } -# Helpers to extract info ---- -get_bib_howpublised <- function(x) { - howpublished <- x$medium - - if (!is.null(howpublished)) { - f <- toupper(substr(howpublished, 0, 1)) - rest <- substr(howpublished, 2, nchar(howpublished)) - howpublished <- paste0(c(f, rest), collapse = "") - } - - clean_str(howpublished) -} - -get_bib_note <- function(x) { - note <- x$notes - - # unpublished needs a note - if (all(is.null(note), tolower(x$type) == "unpublished")) { - note <- "Extracted with cffr R package" - } - - clean_str(note) -} - -guess_bibtype <- function(x) { - init_guess <- switch(tolower(x$type), - "article" = "article", - "book" = "book", - "manual" = "manual", - "unpublished" = "unpublished", - "conference" = "inproceedings", - "conference-paper" = "inproceedings", - "proceedings" = "proceedings", - "magazine-article" = "article", - "newspaper-article" = "article", - "pamphlet" = "booklet", - "report" = "techreport", - "thesis" = "mastersthesis", - # We would need to guess - "misc" - ) - - # Try guess Inbook ---- - # inbook is a book where chapter or pages are present - has_chapter <- !is.null(clean_str(x$section)) - has_pages <- !is.null( - clean_str(paste(unique(c(x$start, x$end)), collapse = "--")) - ) - - if (all(init_guess == "book", any(has_chapter, has_pages))) { - init_guess <- "inbook" - return(init_guess) - } - - # Try guess Phdthesis ---- - if (init_guess == "mastersthesis") { - ttype <- clean_str(gsub("[[:punct:]]", "", x$`thesis-type`, perl = TRUE)) - # phd - if (all(!is.null(ttype), grepl("phd", ttype, ignore.case = TRUE))) { - return("phdthesis") - } - } - - # Try guess InCollection ---- - # Hint: is misc with collection-title and publisher - if (init_guess == "misc") { - if (!is.null(clean_str(x$`collection-title`))) { - return("incollection") - } - } - - init_guess -} - -get_bib_address <- function(x) { - # BibTeX 'address' is taken from the publisher (book, others) or the - # conference (inproceedings). - # Set logic: conference > institution > publisher - if (!is.null(x$conference)) { - addr_search <- x$conference - } else if (!is.null(x$institution)) { - addr_search <- x$institution - } else { - addr_search <- x$publisher - } - - address <- clean_str( - paste(c( - addr_search$address, addr_search$city, addr_search$region, - addr_search$country - ), collapse = ", ") - ) - - # As a fallback, use also location - if (is.null(address) && !is.null(x$location)) { - address <- clean_str(x$location$name) - } - - address -} - -get_bib_booktitle <- function(x, bibtype) { - # This map collection title. - # If inproceedings, incollection to booktitle - # rest of cases to series - - book_series <- list() - tag_value <- clean_str(x[["collection-title"]]) - - - if (!bibtype %in% c("incollection", "inproceedings")) { - book_series$series <- tag_value - } else { - # Only for incollections and inproceedings map booktitle - book_series$booktitle <- tag_value - - # Fallback to conference name for inproceedings - if (all(bibtype == "inproceedings", is.null(tag_value))) { - book_series$booktitle <- clean_str(x$conference$name) - } - } - book_series -} - -get_bib_inst_org <- function(x, bibtype) { - # For inproceedings, proceedings and manual this field - # is organization - # For thesis it should be school - - inst_org <- list() - # Just name - inst_name <- clean_str(x$institution$name) - - if (bibtype %in% c("inproceedings", "proceedings", "manual")) { - inst_org$organization <- inst_name - } else if (grepl("thesis", bibtype, ignore.case = TRUE)) { - inst_org$school <- inst_name - } else { - inst_org$institution <- inst_name - } - - # Fallback for techreport, search on affiliation first author - - if (bibtype == "techreport" && is.null(inst_org$institution)) { - inst_org$institution <- clean_str(x$authors[[1]]$affiliation) - } - - inst_org -} - -make_bibkey <- function(tobibentry) { - # Be kind and provided a bibentry key - - y <- tobibentry$year - - - # Init etall - etall <- NULL - - - # Also Some entries don't have authors, but editors - # Others may have none (misc, pamphlet) - - init_aut <- tobibentry$author - - # Try get editor in null - if (is.null(init_aut)) { - init_aut <- tobibentry$editor - } - - # If none then get a key based in title - if (any(is.null(init_aut), length(init_aut) == 0)) { - r <- tolower(tobibentry$title) - # Reduce lenght to 15 max - r <- substr(r, 0, 15) - } else { - # First name/surname and et_all if additional authors - # Bear in mind institutions has only given - - nauths <- length(init_aut) - if (nauths > 1) etall <- "_etall" - - - # Get info of first author - unz <- unlist(init_aut[1]) - if ("family" %in% names(unz)) { - r <- unz["family"] - } else { - r <- unz["given"] - } - - r <- tolower(paste0(r, collapse = "")) - } - - # Try hard to remove accents - # First with iconv - r <- iconv(r, from = "UTF-8", to = "ASCII//TRANSLIT", sub = "?") - - # Next to latex - r <- encoded_utf_to_latex(r) - - # Finally keep only a-z letters for key - r <- gsub("[^_a-z]", "", r) - - # Append etall and year - key <- paste0(c(r, etall), collapse = "") - key <- paste(c(key, y), collapse = ":") - key -} - - -get_bib_month <- function(x) { - m <- x$month - - # Fallback - - if (is.null(m) && !is.null(x$`date-published`)) { - # Should be YYYY-MM-DD to be valid on cff, so - m <- as.integer(format(as.Date(x$`date-published`), "%m")) - } - - # Try to get 3 month string - m_int <- suppressWarnings(as.integer(m)) - m_letters <- clean_str(tolower(month.abb[m_int])) - - if (!is.null(m_letters)) { - month <- m_letters - } else { - month <- clean_str(m) - } - - month -} - -get_bib_year <- function(x) { - year <- x$year - - # Fallback - - if (is.null(year) && !is.null(x$`date-released`)) { - # Should be YYYY-MM-DD to be valid on cff, so - year <- substr(x$`date-released`, 1, 4) - } - - clean_str(year) -} +# Utils in utils-bib.R diff --git a/R/as_cff.R b/R/as_cff.R index 23146616..820617bf 100644 --- a/R/as_cff.R +++ b/R/as_cff.R @@ -108,11 +108,11 @@ as_cff.bibentry <- function(x, ...) { # Add clases cff_refs_class <- lapply(cff_refs, function(x) { - class(x) <- unique(c("cff_ref", "cff", class(x))) + class(x) <- c("cff_ref", "cff") x }) - class(cff_refs_class) <- c("cff_ref_list", "cff", "list") + class(cff_refs_class) <- c("cff_ref_list", "cff") cff_refs_class } @@ -126,11 +126,11 @@ as_cff.Bibtex <- function(x, ...) { # Add clases cff_refs_class <- lapply(cff_refs, function(x) { - class(x) <- unique(c("cff_ref", "cff", class(x))) + class(x) <- c("cff_ref", "cff") x }) - class(cff_refs_class) <- c("cff_ref_list", "cff", "list") + class(cff_refs_class) <- c("cff_ref_list", "cff") cff_refs_class } @@ -174,24 +174,24 @@ rapply_class <- function(x) { if (guess == "cff_pers_list") { xelement <- lapply(xelement, function(j) { j_in <- j - class(j_in) <- c("cff_pers", "cff", "list") + class(j_in) <- c("cff_pers", "cff") j_in }) - class(xelement) <- c("cff_pers_list", "cff", "list") + class(xelement) <- c("cff_pers_list", "cff") } if (guess == "cff_ref_list") { xelement <- lapply(xelement, function(j) { j_in <- rapply_class(j) - class(j_in) <- c("cff_ref", "cff", "list") + class(j_in) <- c("cff_ref", "cff") j_in }) - class(xelement) <- c("cff_ref_list", "cff", "list") + class(xelement) <- c("cff_ref_list", "cff") } if (guess %in% c("cff_ref", "cff_pers")) { xin <- rapply_class(xelement) - class(xin) <- c(guess, "cff", "list") + class(xin) <- c(guess, "cff") xelement <- xin } return(xelement) @@ -227,19 +227,19 @@ new_cff <- function(x) { if (guess_x == "cff_ref_list") { x2 <- lapply(x, function(j) { j2 <- rapply_class(j) - class(j2) <- c("cff_ref", "cff", "list") + class(j2) <- c("cff_ref", "cff") j2 }) - class(x2) <- c(guess_x, "cff", "list") + class(x2) <- c(guess_x, "cff") return(x2) } xend <- rapply_class(x) final_class <- switch(guess_x, - "cff_full" = c("cff", "list"), - "unclear" = c("cff", "list"), - c(guess_x, "cff", "list") + "cff_full" = "cff", + "unclear" = "cff", + c(guess_x, "cff") ) if (!is.null(final_class)) { diff --git a/R/as_cff_person.R b/R/as_cff_person.R index c3b4fd07..5e2260e4 100644 --- a/R/as_cff_person.R +++ b/R/as_cff_person.R @@ -1,17 +1,19 @@ -#' Create a `person` with the corresponding [`cff`] structure +#' Coerce strings or [person][utils::person] objects to [`cff`] objects #' #' @description #' -#' Create a list of `person` or `entity` as defined by the +#' Create a list of `definitions.person` or `definitions.entity` as defined by +#' the #' #' ```{r, echo=FALSE, results='asis'} #' #' cat(paste0(" [Citation File Format schema]", #' "(https://github.com/citation-file-format/", -#' "citation-file-format/blob/main/schema-guide.md).")) +#' "citation-file-format/blob/main/schema-guide.md) ")) #' #' #' ``` +#' from different sources. #' #' [as_cff_person()] can convert the following objects: #' - Objects with class `person` as provided by [utils::person()]. @@ -19,8 +21,6 @@ #' using the standard BibTeX notation. See Markey (2007) for a full #' explanation. #' -#' [as_cff_person()] would recognize if the input should be converted using the -#' CFF reference `person` or `entity`. #' #' @seealso #' Examples in `vignette("cffr", "cffr")` and [utils::person()]. @@ -38,8 +38,8 @@ #' See **Examples**. #' #' @return -#' `as_cff_person()` returns a list of persons or entities with class `cff` and -#' subclass `cff_pers_list`, converted to the +#' `as_cff_person()` returns an object of classes `"cff_pers_list", "cff"` +#' according to the #' ```{r, echo=FALSE, results='asis'} #' #' cat(paste0(" [Citation File Format schema]", @@ -48,11 +48,15 @@ #' #' #' ``` -#' Each element of the `cff_pers_list` would have a class `cff` and a subclass -#' `cff_pers`. +#' Each element of the `"cff_pers_list", "cff"` object would have classes +#' `"cff_pers", "cff"`. Learn more about the \CRANpkg{cffr} class system in +#' [cff_class]. #' #' @details #' +#' [as_cff_person()] would recognize if the input should be converted using the +#' CFF reference for `definition.person` or `definition.entity`. +#' #' `as_cff_person()` uses a custom algorithm that tries to break a name as #' explained in Section 11 of "Tame the BeaST" (Markey, 2007) (see also #' Decoret, 2007): @@ -109,11 +113,10 @@ #' #' cff_person <- as_cff_person(a_person) #' -#' # Class cff and a special subclass +#' # Class cff_pers_list / cff #' class(cff_person) #' -#' # With each element with other special subclass -#' +#' # With each element with class cff_pers / cff #' class(cff_person[[1]]) #' #' # Print @@ -130,9 +133,21 @@ #' as_cff_person(a_str) #' #' # Several persons -#' persons <- c(person("Clark", "Kent"), person("Lois", "Lane")) +#' persons <- c( +#' person("Clark", "Kent", comment = c(affiliation = "Daily Planet")), +#' person("Lois", "Lane"), person("Oscorp Inc.") +#' ) +#' +#' a_cff <- as_cff_person(persons) +#' +#' a_cff +#' +#' # Printed as Bibtex thanks to the method +#' toBibtex(a_cff) +#' +#' # Or as person object +#' as.person(a_cff) #' -#' as_cff_person(persons) #' #' # Or you can use BibTeX style if you prefer #' @@ -141,6 +156,8 @@ #' as_cff_person(x) #' #' as_cff_person("Herbert von Karajan") +#' +#' toBibtex(as_cff_person("Herbert von Karajan")) as_cff_person <- function(person) { if (any(is.null(person), is.na(person), length(person) == 0)) { return(NULL) @@ -333,141 +350,3 @@ create_person_from_txt <- function(as_bib_text) { pers_cff <- validate_cff_person_fields(pers_cff) pers_cff } - -guess_hint <- function(person) { - if (inherits(person, "person")) { - return("person") - } - - # Rest of cases "txt" - return("txt") -} - - -split_txt_persons <- function(person) { - person <- trimws(person) - person <- paste0(person, collapse = " and ") - - # Remove role on [] as it comes from print.person by default - # We don't use it here - person <- gsub("\\[[^()]*\\]", "", person) - - # Protect 'and' on brackets {} - # Lower - protected <- gsub("(and)(?![^\\}]*(\\{|$))", "@nd@", - person, - perl = TRUE - ) - - # upper - protected <- gsub("AND(?![^\\}]*(\\{|$))", "@ND@", - protected, - perl = TRUE - ) - - # Do the same for 'and' in comments "()" as provided by print.person - # Lower - protected <- gsub("(and)(?![^\\)]*(\\(|$))", "@nd@", - protected, - perl = TRUE - ) - - # upper - protected <- gsub("AND(?![^\\)]*(\\(|$))", "@ND@", - protected, - perl = TRUE - ) - - # Do the same for 'and' in "<>". These are email, should never happen - # Lower - protected <- gsub("(and)(?![^>]*(<|$))", "@nd@", - protected, - perl = TRUE - ) - - # upper - protected <- gsub("AND(?![^>]*(<|$))", "@ND@", - protected, - perl = TRUE - ) - - auths <- unlist(strsplit(protected, " and | AND ")) - - # Unprotec - auths_un <- gsub("@nd@", "and", auths) - auths_un <- gsub("@ND@", "AND", auths_un) - - auths_un -} - -extract_person_comments <- function(person) { - # Ensure person type - person <- as.person(person) - - # Extract from comments - comm_cff <- as.list(person$comment) - names(comm_cff) <- tolower(names(comm_cff)) - nms_com <- names(comm_cff) - comment_as_text <- tolower(clean_str(comm_cff)) - - # Special case when coerced from text, only can extract orcid and web - if (all( - any(is.na(nms_com), length(nms_com) == 0), - length(comment_as_text > 0) - ) - ) { - split_comments <- unlist(strsplit(comment_as_text, ",| |<|>")) - - # Guess that seems to be a web - url_comment <- split_comments[is_url(split_comments)] - - # guess orcid - orcid <- url_comment[grepl("orcid.org/", url_comment)] - - # Get the first non-orcid url - web <- url_comment[!grepl("orcid.org/", url_comment)][1] - - # Reset comment list - comm_cff <- list() - - comm_cff$orcid <- clean_str(orcid) - comm_cff$website <- clean_str(web) - } - - # Add url to orcid if not present - # Get leading invalid urls - - if (!is.null(comm_cff$orcid)) { - orcid <- gsub("^orcid.org/", "", comm_cff$orcid) - orcid <- gsub("^https://orcid.org/", "", orcid) - orcid <- gsub("^http://orcid.org/", "", orcid) - - comm_cff$orcid <- paste0("https://orcid.org/", orcid) - } - - # Add website - web <- comm_cff$website - - if (!is.null(web)) { - comm_cff$website <- clean_str(web[is_url(web)]) - } - - # Add also email - # Check if several mails (MomTrunc 6.0) - look_emails <- c(unlist(person$email), comm_cff$email) - valid_emails <- unlist(lapply(look_emails, is_email)) - email <- look_emails[valid_emails][1] - - # Final list - fin_list <- c( - list(email = NULL), - comm_cff["email" != names(comm_cff)] - ) - fin_list$email <- clean_str(email) - - fin_list -} - -protect_bib_braces <- function(x) { - paste0("{", x, "}") -} diff --git a/R/cff.R b/R/cff.R index 3d619158..c2a3629d 100644 --- a/R/cff.R +++ b/R/cff.R @@ -1,7 +1,7 @@ #' Read and manipulate `cff` objects #' #' A class and utility methods for reading, creating and holding CFF -#' information. See [`cff-class`] to learn more about `cff` objects. +#' information. See [`cff_class`] to learn more about `cff` objects. #' #' @rdname cff #' @name cff diff --git a/R/docs.R b/R/docs.R index a9db5e3c..0023b567 100644 --- a/R/docs.R +++ b/R/docs.R @@ -1,6 +1,6 @@ #' The `cff` class #' -#' @name cff-class +#' @name cff_class #' @keywords internal #' #' @family s3method diff --git a/R/cff-methods.R b/R/methods.R similarity index 58% rename from R/cff-methods.R rename to R/methods.R index 519a61bf..ca9fb142 100644 --- a/R/cff-methods.R +++ b/R/methods.R @@ -60,42 +60,6 @@ as.data.frame.cff <- function(x, row.names = NULL, optional = FALSE, ...) { # nolint end -#' @rdname as_cff_person -#' @name as.person.cff -#' @order 3 -#' -#' @description -#' -#' The inverse transformation (`cff` person to [`person`][utils::as.person()]) -#' object can be done through the [as.person.cff()] method. Note that this is -#' expected to be used with a `cff` person, not with a complete `cff` object. -#' -#' -#' @family s3method -#' @export -#' @seealso [utils::person()] -#' -#' @param x `cff` object representing a person or entity. -#' -#' @return -#' -#' `as.person.cff()` returns a `person` object. -as.person.cff <- function(x) { - # If single enclose on a list - is_single <- any(grepl("^name$|^given-names|^family-names", names(x))) - - if (is_single) x <- list(x) - - - pers <- lapply(x, make_r_person) - - # If not all extracted, malformed, return null - if (!all(lengths(pers) > 0)) { - return(person()) - } - do.call(c, pers) -} - #' Head #' #' @noRd @@ -139,15 +103,18 @@ as.list.cff <- function(x, ...) { #' @order 2 #' #' @description -#' Additionally, it is also provided a method for [toBibtex()], that can -#' convert [`cff`] objects to `Bibtex` objects as provided by -#' [utils::toBibtex()]. These objects are character vectors with BibTeX markup. +#' +#' `toBibtex.cff()` method can convert [`cff`] objects to `Bibtex` objects on +#' the fly, see **Examples**. #' #' @family s3method #' @export -#' @seealso [utils::toBibtex()] +#' @seealso +#' +#' [utils::toBibtex()] to get more information about the `toBibtex.cff()` +#' method. #' -#' @param object `cff` object. +#' @param object For `toBibtex.cff()` a [`cff`] object. #' @param ... Arguments passed to [utils::toBibtex()]. #' #' @return @@ -156,19 +123,89 @@ as.list.cff <- function(x, ...) { #' markup. toBibtex.cff <- function(object, ..., what = c("preferred", "references", "all")) { - # If a single reference... - if ("cff-version" %in% names(object)) { - # If full cff - biblist_cff <- as_bibentry(x = object, what = what) - } else { - # Need to enlist if single - if ("type" %in% names(object)) { - object <- list(object) - class(object) <- c("cff", "list") - } + toBibtex(as_bibentry(object, what), ...) +} - bib_list <- lapply(object, make_bibentry) - biblist_cff <- do.call(c, bib_list) +#' @rdname as_bibentry +#' @order 3 +#' @export +toBibtex.cff_pers_list <- function(object, ...) { + toBibtex(as.person(object), ...) +} + +#' @rdname as_bibentry +#' @order 4 +#' @export +toBibtex.cff_pers <- function(object, ...) { + toBibtex(as.person(object), ...) +} + +#' @rdname as_cff_person +#' @order 2 +#' +#' @description +#' +#' The inverse transformation (`cff_pers / cff_pers_list` to +#' [`person`][utils::as.person()]) object can be done through the +#' `as.person.cff_pers()` / `as.person().cff_pers_list()` methods. +#' +#' The output also can printed on BibTeX markup via the corresponding +#' [`toBibtex()`][toBibtex.cff_pers] methods. +#' +#' +#' +#' @family s3method +#' @export +#' +#' @param x A `cff_pers` or `cff_pers_list` object. +#' +#' @return +#' +#' `as.person.cff_pers()` / `as.person.cff_pers_list()` returns a +#' [`person`][utils::person] object. +#' +as.person.cff_pers <- function(x) { + # Enlist to dispatch to Next method + x_l <- list(as.list(x)) + as.person(as_cff(x_l)) +} + +#' @rdname as_cff_person +#' @order 3 +#' @export +as.person.cff_pers_list <- function(x) { + pers <- lapply(x, make_r_person) + + # If not all extracted, malformed, return null + if (!all(lengths(pers) > 0)) { + return(person()) } - toBibtex(biblist_cff, ...) + do.call(c, pers) +} + + +# as.person methods not implemented ---- + +#' @export +#' @noRd +as.person.cff <- function(x) { + cli::cli_abort( + "({.pkg cffr}) {.fn as.person.cff} method not implemented yet." + ) +} + +#' @export +#' @noRd +as.person.cff_ref <- function(x) { + cli::cli_abort( + "({.pkg cffr}) {.fn as.person.cff_ref} method not implemented yet." + ) +} + +#' @export +#' @noRd +as.person.cff_ref_list <- function(x) { + cli::cli_abort( + "({.pkg cffr}) {.fn as.person.cff_ref_list} method not implemented yet." + ) } diff --git a/R/utils-bib.R b/R/utils-bib.R new file mode 100644 index 00000000..ac29300e --- /dev/null +++ b/R/utils-bib.R @@ -0,0 +1,250 @@ +protect_bib_braces <- function(x) { + paste0("{", x, "}") +} + +# Utils for as_bibentry ---- +get_bib_howpublised <- function(x) { + howpublished <- x$medium + + if (!is.null(howpublished)) { + f <- toupper(substr(howpublished, 0, 1)) + rest <- substr(howpublished, 2, nchar(howpublished)) + howpublished <- paste0(c(f, rest), collapse = "") + } + + clean_str(howpublished) +} + +get_bib_note <- function(x) { + note <- x$notes + + # unpublished needs a note + if (all(is.null(note), tolower(x$type) == "unpublished")) { + note <- "Extracted with cffr R package" + } + + clean_str(note) +} + +guess_bibtype <- function(x) { + init_guess <- switch(tolower(x$type), + "article" = "article", + "book" = "book", + "manual" = "manual", + "unpublished" = "unpublished", + "conference" = "inproceedings", + "conference-paper" = "inproceedings", + "proceedings" = "proceedings", + "magazine-article" = "article", + "newspaper-article" = "article", + "pamphlet" = "booklet", + "report" = "techreport", + "thesis" = "mastersthesis", + # We would need to guess + "misc" + ) + + # Try guess Inbook ---- + # inbook is a book where chapter or pages are present + has_chapter <- !is.null(clean_str(x$section)) + has_pages <- !is.null( + clean_str(paste(unique(c(x$start, x$end)), collapse = "--")) + ) + + if (all(init_guess == "book", any(has_chapter, has_pages))) { + init_guess <- "inbook" + return(init_guess) + } + + # Try guess Phdthesis ---- + if (init_guess == "mastersthesis") { + ttype <- clean_str(gsub("[[:punct:]]", "", x$`thesis-type`, perl = TRUE)) + # phd + if (all(!is.null(ttype), grepl("phd", ttype, ignore.case = TRUE))) { + return("phdthesis") + } + } + + # Try guess InCollection ---- + # Hint: is misc with collection-title and publisher + if (init_guess == "misc") { + if (!is.null(clean_str(x$`collection-title`))) { + return("incollection") + } + } + + init_guess +} + +get_bib_address <- function(x) { + # BibTeX 'address' is taken from the publisher (book, others) or the + # conference (inproceedings). + # Set logic: conference > institution > publisher + if (!is.null(x$conference)) { + addr_search <- x$conference + } else if (!is.null(x$institution)) { + addr_search <- x$institution + } else { + addr_search <- x$publisher + } + + address <- clean_str( + paste(c( + addr_search$address, addr_search$city, addr_search$region, + addr_search$country + ), collapse = ", ") + ) + + # As a fallback, use also location + if (is.null(address) && !is.null(x$location)) { + address <- clean_str(x$location$name) + } + + address +} + +get_bib_booktitle <- function(x, bibtype) { + # This map collection title. + # If inproceedings, incollection to booktitle + # rest of cases to series + + book_series <- list() + tag_value <- clean_str(x[["collection-title"]]) + + + if (!bibtype %in% c("incollection", "inproceedings")) { + book_series$series <- tag_value + } else { + # Only for incollections and inproceedings map booktitle + book_series$booktitle <- tag_value + + # Fallback to conference name for inproceedings + if (all(bibtype == "inproceedings", is.null(tag_value))) { + book_series$booktitle <- clean_str(x$conference$name) + } + } + book_series +} + +get_bib_inst_org <- function(x, bibtype) { + # For inproceedings, proceedings and manual this field + # is organization + # For thesis it should be school + + inst_org <- list() + # Just name + inst_name <- clean_str(x$institution$name) + + if (bibtype %in% c("inproceedings", "proceedings", "manual")) { + inst_org$organization <- inst_name + } else if (grepl("thesis", bibtype, ignore.case = TRUE)) { + inst_org$school <- inst_name + } else { + inst_org$institution <- inst_name + } + + # Fallback for techreport, search on affiliation first author + + if (bibtype == "techreport" && is.null(inst_org$institution)) { + inst_org$institution <- clean_str(x$authors[[1]]$affiliation) + } + + inst_org +} + +make_bibkey <- function(tobibentry) { + # Be kind and provided a bibentry key + + y <- tobibentry$year + + + # Init etall + etall <- NULL + + + # Also Some entries don't have authors, but editors + # Others may have none (misc, pamphlet) + + init_aut <- tobibentry$author + + # Try get editor in null + if (is.null(init_aut)) { + init_aut <- tobibentry$editor + } + + # If none then get a key based in title + if (any(is.null(init_aut), length(init_aut) == 0)) { + r <- tolower(tobibentry$title) + # Reduce lenght to 15 max + r <- substr(r, 0, 15) + } else { + # First name/surname and et_all if additional authors + # Bear in mind institutions has only given + + nauths <- length(init_aut) + if (nauths > 1) etall <- "_etall" + + + # Get info of first author + unz <- unlist(init_aut[1]) + if ("family" %in% names(unz)) { + r <- unz["family"] + } else { + r <- unz["given"] + } + + r <- tolower(paste0(r, collapse = "")) + } + + # Try hard to remove accents + # First with iconv + r <- iconv(r, from = "UTF-8", to = "ASCII//TRANSLIT", sub = "?") + + # Next to latex + r <- encoded_utf_to_latex(r) + + # Finally keep only a-z letters for key + r <- gsub("[^_a-z]", "", r) + + # Append etall and year + key <- paste0(c(r, etall), collapse = "") + key <- paste(c(key, y), collapse = ":") + key +} + + +get_bib_month <- function(x) { + m <- x$month + + # Fallback + + if (is.null(m) && !is.null(x$`date-published`)) { + # Should be YYYY-MM-DD to be valid on cff, so + m <- as.integer(format(as.Date(x$`date-published`), "%m")) + } + + # Try to get 3 month string + m_int <- suppressWarnings(as.integer(m)) + m_letters <- clean_str(tolower(month.abb[m_int])) + + if (!is.null(m_letters)) { + month <- m_letters + } else { + month <- clean_str(m) + } + + month +} + +get_bib_year <- function(x) { + year <- x$year + + # Fallback + + if (is.null(year) && !is.null(x$`date-released`)) { + # Should be YYYY-MM-DD to be valid on cff, so + year <- substr(x$`date-released`, 1, 4) + } + + clean_str(year) +} diff --git a/R/utils-persons.R b/R/utils-persons.R index d45f389b..b929f302 100644 --- a/R/utils-persons.R +++ b/R/utils-persons.R @@ -279,3 +279,137 @@ validate_cff_person_fields <- function(parsed_person) { parsed_person } + +guess_hint <- function(person) { + if (inherits(person, "person")) { + return("person") + } + + # Rest of cases "txt" + return("txt") +} + + +split_txt_persons <- function(person) { + person <- trimws(person) + person <- paste0(person, collapse = " and ") + + # Remove role on [] as it comes from print.person by default + # We don't use it here + person <- gsub("\\[[^()]*\\]", "", person) + + # Protect 'and' on brackets {} + # Lower + protected <- gsub("(and)(?![^\\}]*(\\{|$))", "@nd@", + person, + perl = TRUE + ) + + # upper + protected <- gsub("AND(?![^\\}]*(\\{|$))", "@ND@", + protected, + perl = TRUE + ) + + # Do the same for 'and' in comments "()" as provided by print.person + # Lower + protected <- gsub("(and)(?![^\\)]*(\\(|$))", "@nd@", + protected, + perl = TRUE + ) + + # upper + protected <- gsub("AND(?![^\\)]*(\\(|$))", "@ND@", + protected, + perl = TRUE + ) + + # Do the same for 'and' in "<>". These are email, should never happen + # Lower + protected <- gsub("(and)(?![^>]*(<|$))", "@nd@", + protected, + perl = TRUE + ) + + # upper + protected <- gsub("AND(?![^>]*(<|$))", "@ND@", + protected, + perl = TRUE + ) + + auths <- unlist(strsplit(protected, " and | AND ")) + + # Unprotec + auths_un <- gsub("@nd@", "and", auths) + auths_un <- gsub("@ND@", "AND", auths_un) + + auths_un +} + +extract_person_comments <- function(person) { + # Ensure person type + person <- as.person(person) + + # Extract from comments + comm_cff <- as.list(person$comment) + names(comm_cff) <- tolower(names(comm_cff)) + nms_com <- names(comm_cff) + comment_as_text <- tolower(clean_str(comm_cff)) + + # Special case when coerced from text, only can extract orcid and web + if (all( + any(is.na(nms_com), length(nms_com) == 0), + length(comment_as_text > 0) + ) + ) { + split_comments <- unlist(strsplit(comment_as_text, ",| |<|>")) + + # Guess that seems to be a web + url_comment <- split_comments[is_url(split_comments)] + + # guess orcid + orcid <- url_comment[grepl("orcid.org/", url_comment)] + + # Get the first non-orcid url + web <- url_comment[!grepl("orcid.org/", url_comment)][1] + + # Reset comment list + comm_cff <- list() + + comm_cff$orcid <- clean_str(orcid) + comm_cff$website <- clean_str(web) + } + + # Add url to orcid if not present + # Get leading invalid urls + + if (!is.null(comm_cff$orcid)) { + orcid <- gsub("^orcid.org/", "", comm_cff$orcid) + orcid <- gsub("^https://orcid.org/", "", orcid) + orcid <- gsub("^http://orcid.org/", "", orcid) + + comm_cff$orcid <- paste0("https://orcid.org/", orcid) + } + + # Add website + web <- comm_cff$website + + if (!is.null(web)) { + comm_cff$website <- clean_str(web[is_url(web)]) + } + + # Add also email + # Check if several mails (MomTrunc 6.0) + look_emails <- c(unlist(person$email), comm_cff$email) + valid_emails <- unlist(lapply(look_emails, is_email)) + email <- look_emails[valid_emails][1] + + # Final list + fin_list <- c( + list(email = NULL), + comm_cff["email" != names(comm_cff)] + ) + fin_list$email <- clean_str(email) + + fin_list +} diff --git a/inst/WORDLIST b/inst/WORDLIST index 83eac8ca..b58c68cd 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -23,9 +23,6 @@ Fenner GitLab Haines Hernangomez -InBook -InCollection -InProceedings Integrations JOSS Jurriaan @@ -35,7 +32,6 @@ Lamport Leoncio Lifecycle Markey -MastersThesis Mayes Maëlle Niemeyer @@ -43,7 +39,6 @@ ORCID Oren Pandoc Patashnik -PhDThesis Philipp Pérez README @@ -55,7 +50,6 @@ Riederer SPDX Spaaks Suárez -TechReport Waldir Willighagen YAML diff --git a/man/as_bibentry.Rd b/man/as_bibentry.Rd index 70106729..cf2d8392 100644 --- a/man/as_bibentry.Rd +++ b/man/as_bibentry.Rd @@ -1,19 +1,25 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/as_bibentry.R, R/cff-methods.R +% Please edit documentation in R/as_bibentry.R, R/methods.R \name{as_bibentry} \alias{as_bibentry} \alias{toBibtex.cff} +\alias{toBibtex.cff_pers_list} +\alias{toBibtex.cff_pers} \title{Create \code{bibentry} objects from several sources} \usage{ as_bibentry(x, what = c("preferred", "references", "all")) \method{toBibtex}{cff}(object, ..., what = c("preferred", "references", "all")) + +\method{toBibtex}{cff_pers_list}(object, ...) + +\method{toBibtex}{cff_pers}(object, ...) } \arguments{ \item{x}{The source that would be used for generating the \code{\link[=bibentry]{bibentry()}} object via \CRANpkg{cffr}. It could be: \itemize{ -\item A missing value. That would retrieve the DESCRIPTION +\item A missing value. That would retrieve the \code{DESCRIPTION} file on your in-development package. \item An existing \code{cff} object created with \code{\link[=cff]{cff()}}, \code{\link[=cff_create]{cff_create()}} or \code{\link[=as_cff]{as_cff()}}. @@ -31,7 +37,7 @@ info of the package. both the preferred citation info and the references. }} -\item{object}{\code{cff} object.} +\item{object}{For \code{toBibtex.cff()} a \code{\link{cff}} object.} \item{...}{Arguments passed to \code{\link[utils:toLatex]{utils::toBibtex()}}.} } @@ -43,25 +49,28 @@ objects). markup. } \description{ -This function creates \code{bibentry} objects (see \code{\link[utils:bibentry]{utils::bibentry()}}) from -different metadata sources (\code{\link{cff}} objects, \code{DESCRIPTION} files, etc.). -Note that a \strong{R} \code{bibentry} object is the representation of a BibTeX entry, -see \strong{Examples} - -The function tries to map the information of the source \code{x} into a \code{\link{cff}} -object and performs a mapping of the metadata to BibTeX, according to -\code{vignette("bibtex_cff", "cffr")}. +This function creates \code{\link[utils:bibentry]{bibentry}} objects from different +metadata sources (\code{\link{cff}} objects, \code{DESCRIPTION} files, etc.). The inverse transformation (\code{bibentry} object to \code{\link{cff}} reference) can -be done with the corresponding \code{\link[=as_cff]{as_cff()}} method. +be done with the corresponding \code{\link[=as_cff.bibentry]{as_cff.bibentry()}} method. + +\code{toBibtex.cff()} method can convert \code{\link{cff}} objects to \code{Bibtex} objects on +the fly, see \strong{Examples}. +} +\details{ +A \strong{R} \code{\link[utils:bibentry]{bibentry}} object is the representation of a +BibTeX entry. These objects can be converted to BibTeX markup with +\code{\link[utils:toLatex]{utils::toBibtex()}}, that creates an object of class \code{Bibtex} and can be +printed and exported as a valid BibTeX entry. -Additionally, it is also provided a method for \code{\link[=toBibtex]{toBibtex()}}, that can -convert \code{\link{cff}} objects to \code{Bibtex} objects as provided by -\code{\link[utils:toLatex]{utils::toBibtex()}}. These objects are character vectors with BibTeX markup. +\code{as_bibtex()} tries to map the information of the source \code{x} into a \code{\link{cff}} +object and performs a mapping of the metadata to BibTeX, according to +\code{vignette("bibtex_cff", "cffr")}. } \examples{ \donttest{ -# From a cff object +# From a cff object ---- cff_object <- cff() cff_object @@ -74,29 +83,26 @@ class(bib) bib # Print as bibtex - toBibtex(bib) # Thanks to the S3 Method we can also do - toBibtex(cff_object) -# From a CITATION.cff file with options +# Other sources ---- +# From a CITATION.cff path <- system.file("examples/CITATION_complete.cff", package = "cffr") -cff_file <- as_bibentry(path, what = "all") +cff_file <- as_bibentry(path) -toBibtex(cff_file) +cff_file -# For an installed package +# For an installed package with options +installed_package <- as_bibentry("jsonvalidate", what = "all") -installed_package <- as_bibentry("jsonvalidate") - -toBibtex(installed_package) +installed_package # Use a DESCRIPTION file - path2 <- system.file("examples/DESCRIPTION_gitlab", package = "cffr") desc_file <- as_bibentry(path2) @@ -116,9 +122,14 @@ toBibtex(desc_file) } } \seealso{ -\code{\link[utils:bibentry]{utils::bibentry()}} +\code{\link[utils:bibentry]{utils::bibentry()}} to understand more about the \code{bibentry} class. + +\code{vignette("bibtex_cff", "cffr")} provides detailed information about the +internal mapping performed between \code{cff} objects and BibTeX markup ( +both \code{cff} to BibTeX and BibTeX to \code{cff}). -\code{\link[utils:toLatex]{utils::toBibtex()}} +\code{\link[utils:toLatex]{utils::toBibtex()}} to get more information about the \code{toBibtex.cff()} +method. Other functions for working with BibTeX format: \code{\link{cff_read}()}, @@ -133,7 +144,7 @@ Other functions for converting between \strong{R} classes: Other S3 Methods for \code{cff}: \code{\link{as_cff}()}, \code{\link{as_cff_person}()}, -\code{\link{cff-class}} +\code{\link{cff_class}} } \concept{bibtex} \concept{coercing} diff --git a/man/as_cff.Rd b/man/as_cff.Rd index 74bf9ee7..a11027f6 100644 --- a/man/as_cff.Rd +++ b/man/as_cff.Rd @@ -96,7 +96,7 @@ Other functions for converting between \strong{R} classes: Other S3 Methods for \code{cff}: \code{\link{as_bibentry}()}, \code{\link{as_cff_person}()}, -\code{\link{cff-class}} +\code{\link{cff_class}} } \concept{coercing} \concept{s3method} diff --git a/man/as_cff_person.Rd b/man/as_cff_person.Rd index 3aaa09f7..ae9f13f8 100644 --- a/man/as_cff_person.Rd +++ b/man/as_cff_person.Rd @@ -1,13 +1,16 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/as_cff_person.R, R/cff-methods.R +% Please edit documentation in R/as_cff_person.R, R/methods.R \name{as_cff_person} \alias{as_cff_person} -\alias{as.person.cff} -\title{Create a \code{person} with the corresponding \code{\link{cff}} structure} +\alias{as.person.cff_pers} +\alias{as.person.cff_pers_list} +\title{Coerce strings or \link[utils:person]{person} objects to \code{\link{cff}} objects} \usage{ as_cff_person(person) -\method{as.person}{cff}(x) +\method{as.person}{cff_pers}(x) + +\method{as.person}{cff_pers_list}(x) } \arguments{ \item{person}{It can be either: @@ -17,19 +20,23 @@ as_cff_person(person) See \strong{Examples}. }} -\item{x}{\code{cff} object representing a person or entity.} +\item{x}{A \code{cff_pers} or \code{cff_pers_list} object.} } \value{ -\code{as_cff_person()} returns a list of persons or entities with class \code{cff} and -subclass \code{cff_pers_list}, converted to the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. -Each element of the \code{cff_pers_list} would have a class \code{cff} and a subclass -\code{cff_pers}. - -\code{as.person.cff()} returns a \code{person} object. +\code{as_cff_person()} returns an object of classes \verb{"cff_pers_list", "cff"} +according to the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. +Each element of the \verb{"cff_pers_list", "cff"} object would have classes +\verb{"cff_pers", "cff"}. Learn more about the \CRANpkg{cffr} class system in +\link{cff_class}. + +\code{as.person.cff_pers()} / \code{as.person.cff_pers_list()} returns a +\code{\link[utils:person]{person}} object. } \description{ -Create a list of \code{person} or \code{entity} as defined by the -\href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. +Create a list of \code{definitions.person} or \code{definitions.entity} as defined by +the +\href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema} +from different sources. \code{\link[=as_cff_person]{as_cff_person()}} can convert the following objects: \itemize{ @@ -39,14 +46,17 @@ using the standard BibTeX notation. See Markey (2007) for a full explanation. } -\code{\link[=as_cff_person]{as_cff_person()}} would recognize if the input should be converted using the -CFF reference \code{person} or \code{entity}. +The inverse transformation (\code{cff_pers / cff_pers_list} to +\code{\link[utils:person]{person}}) object can be done through the +\code{as.person.cff_pers()} / \verb{as.person().cff_pers_list()} methods. -The inverse transformation (\code{cff} person to \code{\link[utils:person]{person}}) -object can be done through the \code{\link[=as.person.cff]{as.person.cff()}} method. Note that this is -expected to be used with a \code{cff} person, not with a complete \code{cff} object. +The output also can printed on BibTeX markup via the corresponding +\code{\link[=toBibtex.cff_pers]{toBibtex()}} methods. } \details{ +\code{\link[=as_cff_person]{as_cff_person()}} would recognize if the input should be converted using the +CFF reference for \code{definition.person} or \code{definition.entity}. + \code{as_cff_person()} uses a custom algorithm that tries to break a name as explained in Section 11 of "Tame the BeaST" (Markey, 2007) (see also Decoret, 2007): @@ -98,11 +108,10 @@ a_person cff_person <- as_cff_person(a_person) -# Class cff and a special subclass +# Class cff_pers_list / cff class(cff_person) -# With each element with other special subclass - +# With each element with class cff_pers / cff class(cff_person[[1]]) # Print @@ -119,9 +128,21 @@ a_str <- paste0( as_cff_person(a_str) # Several persons -persons <- c(person("Clark", "Kent"), person("Lois", "Lane")) +persons <- c( + person("Clark", "Kent", comment = c(affiliation = "Daily Planet")), + person("Lois", "Lane"), person("Oscorp Inc.") +) + +a_cff <- as_cff_person(persons) + +a_cff + +# Printed as Bibtex thanks to the method +toBibtex(a_cff) + +# Or as person object +as.person(a_cff) -as_cff_person(persons) # Or you can use BibTeX style if you prefer @@ -130,6 +151,8 @@ x <- "Frank Sinatra and Dean Martin and Davis, Jr., Sammy and Joey Bishop" as_cff_person(x) as_cff_person("Herbert von Karajan") + +toBibtex(as_cff_person("Herbert von Karajan")) } \references{ \itemize{ @@ -146,8 +169,6 @@ See \strong{Examples} for more information. \seealso{ Examples in \code{vignette("cffr", "cffr")} and \code{\link[utils:person]{utils::person()}}. -\code{\link[utils:person]{utils::person()}} - Other functions for converting between \strong{R} classes: \code{\link{as_bibentry}()}, \code{\link{as_cff}()} @@ -155,7 +176,7 @@ Other functions for converting between \strong{R} classes: Other S3 Methods for \code{cff}: \code{\link{as_bibentry}()}, \code{\link{as_cff}()}, -\code{\link{cff-class}} +\code{\link{cff_class}} } \concept{coercing} \concept{s3method} diff --git a/man/cff.Rd b/man/cff.Rd index 46cfd6db..916e97c7 100644 --- a/man/cff.Rd +++ b/man/cff.Rd @@ -19,7 +19,7 @@ with a special \code{\link[=print]{print()}} method. } \description{ A class and utility methods for reading, creating and holding CFF -information. See \code{\linkS4class{cff}} to learn more about \code{cff} objects. +information. See \code{\link{cff_class}} to learn more about \code{cff} objects. } \details{ If no additional \code{...} parameters are supplied (the default behavior), diff --git a/man/cff-class.Rd b/man/cff_class.Rd similarity index 97% rename from man/cff-class.Rd rename to man/cff_class.Rd index 4c923674..8d5ed828 100644 --- a/man/cff-class.Rd +++ b/man/cff_class.Rd @@ -1,7 +1,7 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/docs.R -\name{cff-class} -\alias{cff-class} +\name{cff_class} +\alias{cff_class} \title{The \code{cff} class} \description{ \subsection{The \code{cff} class}{ @@ -49,7 +49,7 @@ a_named_list a_cff_object <- as_cff(a_named_list) class(a_cff_object) -#> [1] "cff" "list" +#> [1] "cff" a_cff_object #> first: I @@ -61,8 +61,7 @@ a_cff_object dput(a_cff_object) #> structure(list(first = "I", second = "am", third = "a", fourth = "list", -#> fifth = "with", sixth = "names"), class = c("cff", "list" -#> )) +#> fifth = "with", sixth = "names"), class = "cff") }\if{html}{\out{}} \code{\link[=as_cff]{as_cff()}} not only converts a \code{list} to \code{cff} but also removes items (known diff --git a/man/cff_read_bib_text.Rd b/man/cff_read_bib_text.Rd index f101d62c..bebb9aa7 100644 --- a/man/cff_read_bib_text.Rd +++ b/man/cff_read_bib_text.Rd @@ -18,7 +18,7 @@ cff_read_bib_text(x, encoding = "UTF-8", ...) }} } \value{ -A \code{\link[=cff-class]{cff}} object ready to be used with other functions (i.e. +A \code{\link[=cff_class]{cff}} object ready to be used with other functions (i.e. \code{\link[=cff_create]{cff_create()}}. } \description{ diff --git a/man/chunks/value.Rmd b/man/chunks/value.Rmd index 5d8ae303..64d25fb7 100644 --- a/man/chunks/value.Rmd +++ b/man/chunks/value.Rmd @@ -1,2 +1,2 @@ -A [`cff`][cff-class] object ready to be used with other functions (i.e. +A [`cff`][cff_class] object ready to be used with other functions (i.e. [cff_create()]. diff --git a/man/deprecated_cff_to_bib.Rd b/man/deprecated_cff_to_bib.Rd index 1cff5cd4..27638aa9 100644 --- a/man/deprecated_cff_to_bib.Rd +++ b/man/deprecated_cff_to_bib.Rd @@ -13,7 +13,7 @@ cff_to_bibtex(x, what = c("preferred", "references", "all")) \item{x}{The source that would be used for generating the \code{\link[=bibentry]{bibentry()}} object via \CRANpkg{cffr}. It could be: \itemize{ -\item A missing value. That would retrieve the DESCRIPTION +\item A missing value. That would retrieve the \code{DESCRIPTION} file on your in-development package. \item An existing \code{cff} object created with \code{\link[=cff]{cff()}}, \code{\link[=cff_create]{cff_create()}} or \code{\link[=as_cff]{as_cff()}}. diff --git a/pkgdown/_pkgdown.yml b/pkgdown/_pkgdown.yml index ca293aec..4dc8e015 100644 --- a/pkgdown/_pkgdown.yml +++ b/pkgdown/_pkgdown.yml @@ -27,7 +27,7 @@ reference: - subtitle: The `cff` class desc: Brief introduction to the `cff` class and S3 Methods available. contents: - - cff-class + - cff_class - has_concept("s3method") - subtitle: Citation File Format schema desc: >- diff --git a/tests/testthat/_snaps/as_cff.md b/tests/testthat/_snaps/as_cff.md index 3b42a6aa..aafeef4a 100644 --- a/tests/testthat/_snaps/as_cff.md +++ b/tests/testthat/_snaps/as_cff.md @@ -74,30 +74,30 @@ Code df Output - class - authors cff_pers_list|cff|list - contact cff_pers_list|cff|list - identifiers cff_ref_list|cff|list - references cff_ref_list|cff|list - preferred-citation cff_ref|cff|list + class + authors cff_pers_list|cff + contact cff_pers_list|cff + identifiers cff_ref_list|cff + references cff_ref_list|cff + preferred-citation cff_ref|cff --- Code df2 Output - class - authors cff_pers_list|cff|list - contact cff_pers_list|cff|list - editors cff_pers_list|cff|list - editors-series cff_pers_list|cff|list - recipients cff_pers_list|cff|list - senders cff_pers_list|cff|list - translators cff_pers_list|cff|list - conference cff_pers|cff|list - database-provider cff_pers|cff|list - institution cff_pers|cff|list - location cff_pers|cff|list - publisher cff_pers|cff|list - identifiers cff_ref_list|cff|list + class + authors cff_pers_list|cff + contact cff_pers_list|cff + editors cff_pers_list|cff + editors-series cff_pers_list|cff + recipients cff_pers_list|cff + senders cff_pers_list|cff + translators cff_pers_list|cff + conference cff_pers|cff + database-provider cff_pers|cff + institution cff_pers|cff + location cff_pers|cff + publisher cff_pers|cff + identifiers cff_ref_list|cff diff --git a/tests/testthat/_snaps/cff_read_bib_text.md b/tests/testthat/_snaps/cff_read_bib_text.md index 44a59827..8bfc4150 100644 --- a/tests/testthat/_snaps/cff_read_bib_text.md +++ b/tests/testthat/_snaps/cff_read_bib_text.md @@ -4,7 +4,7 @@ cff_read_bib_text(a_cff) Condition Error in `cff_read_bib_text()`: - ! `x` should be a , not a . + ! `x` should be a , not a . --- diff --git a/tests/testthat/_snaps/cff-methods.md b/tests/testthat/_snaps/methods.md similarity index 98% rename from tests/testthat/_snaps/cff-methods.md rename to tests/testthat/_snaps/methods.md index 8de91935..8aa656f5 100644 --- a/tests/testthat/_snaps/cff-methods.md +++ b/tests/testthat/_snaps/methods.md @@ -922,6 +922,30 @@ [1] "One Truly van der Real Person IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" [2] "Entity Project Team Conference entity (22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io, 2017-01-01, 2017-01-31, The team garage)" +# Errors on other as.person methods + + Code + as.person(the_cff) + Condition + Error in `as.person()`: + ! (cffr) `as.person.cff()` method not implemented yet. + +--- + + Code + as.person(key) + Condition + Error in `as.person()`: + ! (cffr) `as.person.cff_ref_list()` method not implemented yet. + +--- + + Code + as.person(key) + Condition + Error in `as.person()`: + ! (cffr) `as.person.cff_ref()` method not implemented yet. + # head and tail Code @@ -1055,3 +1079,24 @@ url = {https://ggplot2.tidyverse.org}, } +--- + + Code + toBibtex(sev_auth) + Output + [1] "{The Big Bopper} and Sinatra, Frank and Martin, Dean and Davis Jr., Sammy" + +--- + + Code + toBibtex(single) + Output + [1] "person, A" + +--- + + Code + toBibtex(single) + Output + [1] "{A and B co}" + diff --git a/tests/testthat/test-as_bibentry.R b/tests/testthat/test-as_bibentry.R index c95b961f..7a95ee0b 100644 --- a/tests/testthat/test-as_bibentry.R +++ b/tests/testthat/test-as_bibentry.R @@ -539,6 +539,3 @@ test_that("Fallback month", { x2 <- as_cff(bib2) expect_identical(x2[[1]]$month, "12") }) - - -# Classes ---- diff --git a/tests/testthat/test-as_cff.R b/tests/testthat/test-as_cff.R index 7edfd5e0..35da8836 100644 --- a/tests/testthat/test-as_cff.R +++ b/tests/testthat/test-as_cff.R @@ -6,7 +6,7 @@ test_that("as.cff still works", { expect_silent(l1 <- as_cff(l)) expect_silent(l2 <- as.cff(l)) - expect_s3_class(l1, c("cff", "list"), exact = TRUE) + expect_s3_class(l1, "cff", exact = TRUE) expect_snapshot(l2) }) @@ -23,8 +23,8 @@ test_that("as_cff.person", { ) expect_silent(aa <- as_cff_person(pers)) - expect_s3_class(aa, c("cff_pers_list", "cff", "list"), exact = TRUE) - expect_s3_class(aa[[1]], c("cff_pers", "cff", "list"), exact = TRUE) + expect_s3_class(aa, c("cff_pers_list", "cff"), exact = TRUE) + expect_s3_class(aa[[1]], c("cff_pers", "cff"), exact = TRUE) expect_identical(aa, as_cff_person(pers)) expect_snapshot(as_cff(pers)) @@ -32,7 +32,7 @@ test_that("as_cff.person", { single <- as.list(aa)[[1]] expect_false(inherits(single, "cff")) single_cff <- as_cff(single) - expect_s3_class(single_cff, c("cff_pers", "cff", "list"), exact = TRUE) + expect_s3_class(single_cff, c("cff_pers", "cff"), exact = TRUE) expect_snapshot(single_cff) }) @@ -44,8 +44,8 @@ test_that("as_cff.bibentry, toBibtex", { bbb <- as_cff(b) - expect_s3_class(bbb, c("cff_ref_list", "cff", "list"), exact = TRUE) - expect_s3_class(bbb[[1]], c("cff_ref", "cff", "list"), exact = TRUE) + expect_s3_class(bbb, c("cff_ref_list", "cff"), exact = TRUE) + expect_s3_class(bbb[[1]], c("cff_ref", "cff"), exact = TRUE) expect_snapshot(bbb) b_bib <- toBibtex(b) @@ -59,7 +59,7 @@ test_that("as_cff.bibentry, toBibtex", { b_single <- as.list(bbb)[[1]] expect_false(inherits(b_single, "cff")) b_single_cff <- as_cff(b_single) - expect_s3_class(b_single_cff, c("cff_ref", "cff", "list"), exact = TRUE) + expect_s3_class(b_single_cff, c("cff_ref", "cff"), exact = TRUE) expect_snapshot(b_single_cff) # Check empty @@ -102,12 +102,12 @@ test_that("]] cff_ref", { expect_s3_class(b_all, "bibentry", exact = TRUE) bbb <- as_cff(b_all) - expect_s3_class(bbb, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(bbb, c("cff_ref_list", "cff"), exact = TRUE) expect_length(bbb, 2) b2_reg <- bbb[2] expect_length(b2_reg, 1) - expect_s3_class(b2_reg, c("cff_ref", "cff", "list"), exact = TRUE) + expect_s3_class(b2_reg, c("cff_ref", "cff"), exact = TRUE) }) test_that("]] cff_pers", { @@ -119,12 +119,12 @@ test_that("]] cff_pers", { expect_s3_class(b_all, "person", exact = TRUE) bbb <- as_cff(b_all) - expect_s3_class(bbb, c("cff_pers_list", "cff", "list"), exact = TRUE) + expect_s3_class(bbb, c("cff_pers_list", "cff"), exact = TRUE) expect_length(bbb, 2) b2_reg <- bbb[2] expect_length(b2_reg, 1) - expect_s3_class(b2_reg, c("cff_pers", "cff", "list"), exact = TRUE) + expect_s3_class(b2_reg, c("cff_pers", "cff"), exact = TRUE) }) # Check full classes with recursion diff --git a/tests/testthat/test-as_cff_reference.R b/tests/testthat/test-as_cff_reference.R index 6806bf89..82c6eea1 100644 --- a/tests/testthat/test-as_cff_reference.R +++ b/tests/testthat/test-as_cff_reference.R @@ -33,7 +33,7 @@ test_that("Parsed several citations", { desc_path <- system.file("examples/DESCRIPTION_rgeos", package = "cffr") cit_path <- system.file("examples/CITATION_auto", package = "cffr") citobj <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(citobj, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(citobj, c("cff_ref_list", "cff"), exact = TRUE) expect_snapshot(citobj) expect_length(citobj, 3) diff --git a/tests/testthat/test-cff_create.R b/tests/testthat/test-cff_create.R index 883aca28..4f4d693d 100644 --- a/tests/testthat/test-cff_create.R +++ b/tests/testthat/test-cff_create.R @@ -120,11 +120,9 @@ test_that("Default roles on write", { # Same as tmp <- tempfile(fileext = ".cff") - expect_silent( - cf2 <- cff_write(p, - authors_roles = c("aut", "cre"), dependencies = FALSE, - outfile = tmp, verbose = FALSE, validate = FALSE - ) + cf2 <- cff_write(p, + authors_roles = c("aut", "cre"), dependencies = FALSE, + outfile = tmp, verbose = FALSE, validate = FALSE ) expect_identical(cf, cf2) diff --git a/tests/testthat/test-cff_read.R b/tests/testthat/test-cff_read.R index 475a83dc..b3ffea4a 100644 --- a/tests/testthat/test-cff_read.R +++ b/tests/testthat/test-cff_read.R @@ -12,7 +12,7 @@ test_that("cff_read citation.cff", { f <- system.file("examples/CITATION_complete.cff", package = "cffr") f1 <- cff_read(f) expect_true(cff_validate(f1, verbose = FALSE)) - expect_s3_class(f1, c("cff", "list"), exact = TRUE) + expect_s3_class(f1, "cff", exact = TRUE) # With the alias f2 <- cff_read_cff_citation(f) @@ -28,7 +28,7 @@ test_that("cff_read DESCRIPTION", { f1 <- cff_read(f, gh_keywords = FALSE) expect_true(cff_validate(f1, verbose = FALSE)) - expect_s3_class(f1, c("cff", "list"), exact = TRUE) + expect_s3_class(f1, "cff", exact = TRUE) # With the alias f2 <- cff_read_description(f, gh_keywords = FALSE) @@ -63,7 +63,7 @@ test_that("cff_read bib", { f <- system.file("REFERENCES.bib", package = "cffr") f1 <- cff_read(f) - expect_s3_class(f1, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(f1, c("cff_ref_list", "cff"), exact = TRUE) expect_gt(length(f1), 1) # Specific @@ -75,7 +75,7 @@ test_that("cff_read bib", { f <- system.file("examples/example.bib", package = "cffr") f1_2 <- cff_read(f) - expect_s3_class(f1_2, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(f1_2, c("cff_ref_list", "cff"), exact = TRUE) expect_length(f1_2, 2) d <- f1_2[[2]] @@ -100,7 +100,7 @@ test_that("cff_read citation messages", { f <- system.file("examples/CITATION_auto", package = "cffr") expect_message(s <- cff_read(f), "Trying with") - expect_s3_class(s, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(s, c("cff_ref_list", "cff"), exact = TRUE) }) test_that("cff_read CITATION_basic", { @@ -109,7 +109,7 @@ test_that("cff_read CITATION_basic", { path <- system.file("examples/CITATION_basic", package = "cffr") parsed <- cff_read(path, my_meta) - expect_s3_class(parsed, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) expect_equal(length(parsed), 2) }) @@ -119,7 +119,7 @@ test_that("cff_read CITATION with no encoding", { my_meta <- desc_to_meta(desc_path) parsed <- cff_read_citation(cit_path, my_meta) - expect_s3_class(parsed, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) }) test_that("cff_read CITATION_auto", { @@ -148,7 +148,7 @@ test_that("cff_read_safe CITATION_basic", { cit_path <- system.file("examples/CITATION_basic", package = "cffr") parsed <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) expect_equal(length(parsed), 2) }) @@ -158,7 +158,7 @@ test_that("cff_read_safe CITATION with no encoding", { parsed <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) expect_equal(length(parsed), 2) }) @@ -168,7 +168,7 @@ test_that("cff_read_safe CITATION_auto", { cit_path <- system.file("examples/CITATION_auto", package = "cffr") parsed <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) expect_equal(length(parsed), 3) }) @@ -178,7 +178,7 @@ test_that("cff_read_safe CITATION_rmarkdown", { parsed <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff", "list"), exact = TRUE) + expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) expect_equal(length(parsed), 3) }) diff --git a/tests/testthat/test-cff-methods.R b/tests/testthat/test-methods.R similarity index 88% rename from tests/testthat/test-cff-methods.R rename to tests/testthat/test-methods.R index 208222a7..3de6a711 100644 --- a/tests/testthat/test-cff-methods.R +++ b/tests/testthat/test-methods.R @@ -170,21 +170,21 @@ test_that("as.person method", { ) }) -test_that("as person with another cff", { +test_that("Errors on other as.person methods", { path <- system.file("examples/CITATION_complete.cff", package = "cffr") the_cff <- cff_read(path) - expect_s3_class(the_cff, "cff") - expect_identical(as.person(the_cff), person()) + expect_s3_class(the_cff, "cff", exact = TRUE) + expect_snapshot(as.person(the_cff), error = TRUE) # identifiers key <- the_cff$identifiers - expect_s3_class(key, "cff") - expect_identical(as.person(key), person()) + expect_s3_class(key, c("cff_ref_list", "cff"), exact = TRUE) + expect_snapshot(as.person(key), error = TRUE) # preferred key <- the_cff$`preferred-citation` - expect_s3_class(key, "cff") - expect_identical(as.person(key), person()) + expect_s3_class(key, c("cff_ref", "cff"), exact = TRUE) + expect_snapshot(as.person(key), error = TRUE) }) test_that("head and tail", { @@ -250,6 +250,28 @@ test_that("toBibtex", { froml <- toBibtex(cff_read_bib_text(string)) expect_equal(sum(names(froml) == "title"), 1) + + + # Persons + + sev_auth <- as_cff_person( + "{The Big Bopper} and Frank Sinatra and Dean Martin and Davis, Jr., Sammy" + ) + + expect_length(sev_auth, 4) + expect_s3_class(sev_auth, "cff_pers_list") + expect_snapshot(toBibtex(sev_auth)) + + + # Single person + single <- as_cff_person(person("A", "person", email = "a@b.d"))[[1]] + expect_s3_class(single, "cff_pers") + expect_snapshot(toBibtex(single)) + + # Single entity + single <- as_cff_person(person("{A and B co}", email = "a@b.d"))[[1]] + expect_s3_class(single, "cff_pers") + expect_snapshot(toBibtex(single)) }) From 93650432cc7133ae2f0ccb487ed8fe839ffe5223 Mon Sep 17 00:00:00 2001 From: dieghernan Date: Tue, 5 Mar 2024 15:22:19 +0000 Subject: [PATCH 05/13] Make as_bibentry a method --- NAMESPACE | 7 + NEWS.md | 15 +- R/as_bibentry.R | 185 ++++++++++++------ R/methods.R | 8 +- R/utils.R | 44 +++++ README.md | 67 +------ codemeta.json | 4 +- data/cran_to_spdx.rda | Bin 916 -> 907 bytes inst/schemaorg.json | 2 +- man/as_bibentry.Rd | 30 ++- man/as_cff.Rd | 2 +- man/as_cff_person.Rd | 2 +- man/cff_class.Rd | 2 +- man/cff_write_misc.Rd | 9 +- man/roxygen/meta.R | 2 +- tests/testthat/_snaps/as_bibentry.md | 29 ++- tests/testthat/_snaps/mock-package.md | 13 ++ .../testthat/_snaps/xtra-check-bibtex-ruby.md | 4 +- tests/testthat/test-as_bibentry.R | 100 +++++++--- tests/testthat/test-mock-package.R | 4 + 20 files changed, 346 insertions(+), 183 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 13b848fd..42d209b7 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -9,6 +9,13 @@ S3method(as.person,cff_pers) S3method(as.person,cff_pers_list) S3method(as.person,cff_ref) S3method(as.person,cff_ref_list) +S3method(as_bibentry,"NULL") +S3method(as_bibentry,cff) +S3method(as_bibentry,cff_ref) +S3method(as_bibentry,cff_ref_list) +S3method(as_bibentry,character) +S3method(as_bibentry,default) +S3method(as_bibentry,list) S3method(as_cff,Bibtex) S3method(as_cff,bibentry) S3method(as_cff,default) diff --git a/NEWS.md b/NEWS.md index c42f1cf8..f8e31ec0 100644 --- a/NEWS.md +++ b/NEWS.md @@ -34,15 +34,14 @@ package. objects. We recommend using `as_cff_person()` since it can parse also string representing authors in BibTeX markup (`"{von Neumen}, James"`), that can't be captured properly via methods. +- New `as_bibentry()` method for a variety of classes (`character`, `list`, + `NULL` and classes defined by **cffr**). - The following **base** and **utils** methods supports now `cff` class: (TODO) - `as.data.frame.cff()`. - - `as.person.cff()`, that provides results **only** for CFF keys defined - as - [person](https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsperson) - or - [entity](https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsentity) - (e.g. `authors`, `contacts`, `editors`, `publisher,` etc.). + - `as.person()`, although **only** for `definitions.person` or + `definitions.entity` (e.g. `authors`, `contacts`, `editors`, + `publisher,` etc.). - `head.cff()`, `tail.cff()`. - `toBibtex.cff()`. @@ -57,8 +56,8 @@ would warn when used, providing advice on the replacement function. #### Deprecation -- `cff_to_bibtex()` and `cff_extract_to_bibtex()`: replaced by - `as_bibentry()`. +- `cff_to_bibtex()` and `cff_extract_to_bibtex()`: replaced by `as_bibentry()` + method. - `cff_from_bibtex()`: replaced by `cff_read_bib()` (for `*.bib` files) and `cff_read_bib_text()` (for character strings). - `write_bib()` and `write_citation()` : replaced by `cff_write_bib()` and diff --git a/R/as_bibentry.R b/R/as_bibentry.R index 4ebab176..c5b9e3b4 100644 --- a/R/as_bibentry.R +++ b/R/as_bibentry.R @@ -110,69 +110,147 @@ #' #' toBibtex(desc_file) #' } -as_bibentry <- function(x, - what = c("preferred", "references", "all")) { - what <- match.arg(what) - if (is.null(x)) { - return(NULL) +#' +as_bibentry <- function(x, ...) { + UseMethod("as_bibentry") +} + + +#' @export +#' @rdname as_bibentry +#' @order 2 +as_bibentry.default <- function(x, ...) { + dot_list <- list(...) + as_bibentry(dot_list) +} + +#' @export +#' @rdname as_bibentry +#' @order 3 +as_bibentry.character <- function(x, ..., + what = c("preferred", "references", "all")) { + # A named list + if (missing(x)) { + dot_list <- list(...) + return(as_bibentry(dot_list)) } - if (is_cff_file(x)) { - x <- cff_read_cff_citation(x) + + what <- match_cff_arg( + what, c("preferred", "references", "all"), + "what", environment() + ) + + + x <- x[1] + src_detect <- detect_x_source(x) + + if (src_detect == "dontknow") { + # nolint start + msg <- paste0('install.packages("', x, '")') + # nolint end + cli::cli_abort( + paste( + "Don't know how to extract a {.cls bibentry} from {.val {x}}.", + "If it is a package run {.run {msg}} first." + ) + ) } - if (is_cff(x)) { - obj <- x + if (src_detect == "package") { + x <- cff_create(x) } else { - obj <- cff_create(x) + x <- cff_read(x) } + as_bibentry(x, what = what) +} - # Guess case - cff_type <- guess_cff_part(obj) - - if (cff_type == "cff_full") { - # Try to generate preferred if not present - if (!("preferred-citation" %in% names(obj))) { - prefcit <- obj - prefcit$type <- "generic" - prefcit <- prefcit[names(prefcit) %in% cff_schema_definitions_refs()] - prefcit <- new_cff(prefcit) - # And add to the object - obj$`preferred-citation` <- prefcit - } - - # Select type to extract - obj_extract <- switch(what, - "preferred" = list(obj$`preferred-citation`), - "references" = obj$references, - c(list(obj$`preferred-citation`), obj$references) - ) - } else if (cff_type == "cff_ref") { - obj_extract <- list(obj) - } else { - obj_extract <- obj +#' @export +#' @rdname as_bibentry +#' @order 4 +as_bibentry.NULL <- function(x, ...) { + cff_create() +} + + +#' @export +#' @rdname as_bibentry +#' @order 5 +as_bibentry.list <- function(x, ...) { + ## Convert and catch errors ---- + bib <- try(do.call(bibentry, x), silent = TRUE) + + # If key missing + if (inherits(bib, "try-error")) { + message <- attributes(bib)$condition$message + cli::cli_alert_danger(paste("Can't convert to {.fn bibentry}: ")) + cli::cli_alert_info(message) + cli::cli_alert_warning("Returning empty {.cls bibentry}") + return(bibentry()) } - # Cleanup - obj_extract <- as.list(obj_extract) - obj_extract <- obj_extract[lengths(obj_extract) > 0] - if (length(obj_extract) == 0) { - return(NULL) + # Unlist easy to undo the do.call effect + bib <- bib[[1]] +} + + +#' @export +#' @rdname as_bibentry +#' @order 6 +as_bibentry.cff <- function(x, ..., + what = c("preferred", "references", "all")) { + what <- match_cff_arg( + what, c("preferred", "references", "all"), + "what", environment() + ) + + obj <- x + # Try to generate preferred if not present + if (!("preferred-citation" %in% names(obj))) { + prefcit <- obj + prefcit$type <- "generic" + prefcit <- prefcit[names(prefcit) %in% cff_schema_definitions_refs()] + prefcit <- new_cff(prefcit) + # And add to the object + obj$`preferred-citation` <- prefcit } - ref <- lapply(obj_extract, make_bibentry) - ref <- do.call(c, ref) + # Select type to extract + obj_extract <- switch(what, + "preferred" = list(obj$`preferred-citation`), + "references" = obj$references, + c(list(obj$`preferred-citation`), obj$references) + ) + + if (is.null(obj_extract)) { + return(bibentry()) + } + # Prepare for dispatching + objend <- as_cff(obj_extract) + + as_bibentry(objend) +} + +#' @export +#' @rdname as_bibentry +#' @order 7 +as_bibentry.cff_ref_list <- function(x, ...) { + ref <- lapply(x, function(y) { + # Reclass to dispatch method + as_bibentry(as_cff(y)) + }) + ref <- do.call(c, ref) return(ref) } -make_bibentry <- function(x) { - if (is.null(x)) { - return(NULL) - } +#' @export +#' @rdname as_bibentry +#' @order 8 +as_bibentry.cff_ref <- function(x, ...) { # Relist to cff for dispatching methods on persons x <- as_cff(x) @@ -298,22 +376,9 @@ make_bibentry <- function(x) { sorted <- unique[unique %in% names(tobibentry)] tobibentry <- tobibentry[sorted] - ## Convert and catch errors ---- - bib <- try(do.call(bibentry, tobibentry), silent = TRUE) - - # If key missing - if (inherits(bib, "try-error")) { - message <- attributes(bib)$condition$message - cli::cli_alert_danger(paste("Can't convert to {.fn bibentry}: ")) - cli::cli_alert_info(message) - cli::cli_alert_warning("Returning {.val NULL}") - return(NULL) - } - - # Unlist easy to undo the do.call effect - bib <- bib[[1]] + ## Convert - return(bib) + as_bibentry(tobibentry) } # Utils in utils-bib.R diff --git a/R/methods.R b/R/methods.R index ca9fb142..618eba07 100644 --- a/R/methods.R +++ b/R/methods.R @@ -100,7 +100,7 @@ as.list.cff <- function(x, ...) { #' @rdname as_bibentry #' @name toBibtex.cff -#' @order 2 +#' @order 9 #' #' @description #' @@ -123,18 +123,18 @@ as.list.cff <- function(x, ...) { #' markup. toBibtex.cff <- function(object, ..., what = c("preferred", "references", "all")) { - toBibtex(as_bibentry(object, what), ...) + toBibtex(as_bibentry(object, what = what), ...) } #' @rdname as_bibentry -#' @order 3 +#' @order 10 #' @export toBibtex.cff_pers_list <- function(object, ...) { toBibtex(as.person(object), ...) } #' @rdname as_bibentry -#' @order 4 +#' @order 11 #' @export toBibtex.cff_pers <- function(object, ...) { toBibtex(as.person(object), ...) diff --git a/R/utils.R b/R/utils.R index 298165c1..b8b3b7a7 100644 --- a/R/utils.R +++ b/R/utils.R @@ -213,3 +213,47 @@ guess_cff_part <- function(x) { fin } + + +detect_x_source <- function(x) { + if (missing(x)) { + return("indev") + } + + x <- as.character(x)[1] + instpack <- as.character(installed.packages()[, "Package"]) + + if (x %in% instpack) { + return("package") + } + + + if (grepl("\\.cff$", x, ignore.case = TRUE)) { + return("cff_citation") + } + if (grepl("\\.bib$", x, ignore.case = TRUE)) { + return("bib") + } + if (grepl("citat", x, ignore.case = TRUE)) { + return("citation") + } + if (grepl("desc", x, ignore.case = TRUE)) { + return("description") + } + + return("dontknow") +} + +match_cff_arg <- function(arg, valid, for_msg, call = environment()) { + arg <- as.character(arg)[1] + valid <- as.character(valid) + + if (!arg %in% valid) { + cli::cli_abort( + "{.arg {for_msg}} should be {.val {valid}}, not {.val {arg}}.", + call = call + ) + } + + return(arg) +} diff --git a/README.md b/README.md index 954bed00..582a3f42 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ file and the `CITATION` file (if present) of your package. Note that **cffr** works best if your package pass `R CMD check/devtools::check()`. -As per 2024-03-04 there are at least 298 repos on GitHub using **cffr**. +As per 2024-03-05 there are at least 290 repos on GitHub using **cffr**. [Check them out here](https://github.com/search?q=cffr%20path%3A**%2FCITATION.cff&type=code). @@ -561,26 +561,6 @@ test <- cff_create("rmarkdown") - family-names: Chirico. given-names: Michael year: '2024' - - type: software - title: dygraphs - abstract: 'dygraphs: Interface to ''Dygraphs'' Interactive Time Series Charting - Library' - notes: Suggests - url: https://github.com/rstudio/dygraphs - repository: https://CRAN.R-project.org/package=dygraphs - authors: - - family-names: Vanderkam - given-names: Dan - website: http://dygraphs.com/ - - family-names: Allaire - given-names: JJ - - family-names: Owen - given-names: Jonathan - - family-names: Gromer - given-names: Daniel - - family-names: Thieurmel - given-names: Benoit - year: '2024' - type: software title: fs abstract: 'fs: Cross-Platform File System Operations Based on ''libuv''' @@ -597,26 +577,6 @@ test <- cff_create("rmarkdown") given-names: Gábor email: csardi.gabor@gmail.com year: '2024' - - type: software - title: rsconnect - abstract: 'rsconnect: Deploy Docs, Apps, and APIs to ''Posit Connect'', ''shinyapps.io'', - and ''RPubs''' - notes: Suggests - url: https://rstudio.github.io/rsconnect/ - repository: https://CRAN.R-project.org/package=rsconnect - authors: - - family-names: Atkins - given-names: Aron - email: aron@posit.co - - family-names: Allen - given-names: Toph - - family-names: Wickham - given-names: Hadley - - family-names: McPherson - given-names: Jonathan - - family-names: Allaire - given-names: JJ - year: '2024' - type: software title: downlit abstract: 'downlit: Syntax Highlighting and Automatic Linking' @@ -629,19 +589,6 @@ test <- cff_create("rmarkdown") email: hadley@posit.co year: '2024' version: '>= 0.4.0' - - type: software - title: katex - abstract: 'katex: Rendering Math to HTML, ''MathML'', or R-Documentation Format' - notes: Suggests - url: https://docs.ropensci.org/katex/ - repository: https://CRAN.R-project.org/package=katex - authors: - - family-names: Ooms - given-names: Jeroen - email: jeroen@berkeley.edu - orcid: https://orcid.org/0000-0002-4035-0289 - year: '2024' - version: '>= 1.4.0' - type: software title: sass abstract: 'sass: Syntactically Awesome Style Sheets (''Sass'')' @@ -752,18 +699,6 @@ test <- cff_create("rmarkdown") given-names: Davis email: davis@posit.co year: '2024' - - type: software - title: cleanrmd - abstract: 'cleanrmd: Clean Class-Less ''R Markdown'' HTML Documents' - notes: Suggests - url: https://pkg.garrickadenbuie.com/cleanrmd/ - repository: https://CRAN.R-project.org/package=cleanrmd - authors: - - family-names: Aden-Buie - given-names: Garrick - email: garrick@adenbuie.com - orcid: https://orcid.org/0000-0002-7111-0077 - year: '2024' - type: software title: withr abstract: 'withr: Run Code ''With'' Temporarily Modified Global State' diff --git a/codemeta.json b/codemeta.json index a771f6dd..f26f97d2 100644 --- a/codemeta.json +++ b/codemeta.json @@ -14,7 +14,7 @@ "name": "R", "url": "https://r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31 ucrt)", + "runtimePlatform": "R version 4.3.2 (2023-10-31)", "provider": { "@id": "https://cran.r-project.org", "@type": "Organization", @@ -200,7 +200,7 @@ }, "isPartOf": "https://ropensci.org", "keywords": ["attribution", "citation", "credit", "citation-files", "cff", "metadata", "r", "r-package", "citation-file-format", "rstats", "ropensci", "cran"], - "fileSize": "975.452KB", + "fileSize": "963.055KB", "citation": [ { "@type": "ScholarlyArticle", diff --git a/data/cran_to_spdx.rda b/data/cran_to_spdx.rda index 0bd1435cae708e762b962cb7a97ec9ae0c12fef9..33fae2994ac0070f09336d3d8c403ec099798a1f 100644 GIT binary patch literal 907 zcmV;619bdCT4*^jL0KkKS>9zRc>o3)f6)K`|B!TNe+NIOUueH)-|#>H0ssI3&;$PH zMJcLQXdxn$LqY0#n*AnjWXMA%O-!8VroodWVq>8hVWZpyrJl8ess_0%@QOhK3*w7>x`OkO2~jFe$2d zXwawXnjVu)lzN&O4WcqVOllbqQxUPLFe>dz=&r?fZ9@1WKjajLMimfmJ2mUYPPPXx zj*Ybg1&%$?qNxPUhn5dz<$F-rN4=W12TeQiUlnt8<0b6Yj7qp-b#>kRV}V z`Fj%x=Ccapm4j`8K(v)wkqAVgQwwED3edK<)|qCdvuV!(PmL8wD1-^BC2K`h&j3QP zL0u36=!%xXc0(x;6=KLKJiY@Gylf7R8cZa*UN&lz^7{|Bd%yVmS_cO2<5+P%UUOvX zSz@tbnf*NMPJ>Nh!Uq@-^UcwV*1cylE@5sg3T+_5-MXcG?wWWD;2vA3o z&8+4_l-WR}LCwk=!sx_DBpAAMWvtJbNN+NfJb=(2mY@It literal 916 zcmV;F18e*riwFP!000002F+JXPuoBccH=-unm|gSm3lH&4Vna`W6qYoKWTU7l z6%~ibghjRFILLPBCXZ^{hliR@wmg?9cUwC}dynSrJ>vGR3u>%N zH7^9s2jmCN)MFU+jvqQf=!^lyHV7uMxN$r@(R46&?O7nkAL|Ucu8wsheU#`}%-}Kg zct}p6f|sD3J$Vg6?f4@&g<#mx_GkbE&SM;}8efM{{dWsb-+YHfc*I(skDzQMQuc9 z1!Aohx0C}Wz@61{kAy z&Yir|26bnZ!6Z%^)E>i>qx;N-Fqt-QvgfmsNZIvBwAm0ab2)67k|eIP;A`!G#>mpM z^iZ=B9+QksbB1dyET}lE|DM>xOF{e0ntpR!*DkoCa;-+&L=l6|gqH=3{IpT6Ecq3S zYeuoij3tXyK2yb3u8OVv0aVXC*O@pWx)Nr+Lh>6AcYHg*)N{XbpuvMrDCh&%#!Z-b zl8jZht*40o7ZBOVmSVVL106tdg;alC4R?J;u`wz#?3 zjfZj`(dQt-@SQK_UFh<3UL@mGl-u{nq~C0?X$Sm-txHN&hBcFxl`GI@LE+XjzF=3V z{08ZT_c^er!9v~w*h7PCW`0WWB0K*ZUKH0qz)3MkD`ZhnY^`QN@PAw(38+v}F}WMf z%vENY4z3n27R)i3_&a+GMMZe~UGnS+3epi+%Hr$8?&+{g{tFEUC+Ffm{5BSleFUss q*`d9A?PHSyz3NXt?~+F~qDOtlUw3zRKk(1b=<^FCJhi~74*&oy4!s@# diff --git a/inst/schemaorg.json b/inst/schemaorg.json index ce1c62ba..015b552c 100644 --- a/inst/schemaorg.json +++ b/inst/schemaorg.json @@ -26,6 +26,6 @@ "name": "Comprehensive R Archive Network (CRAN)", "url": "https://cran.r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31 ucrt)", + "runtimePlatform": "R version 4.3.2 (2023-10-31)", "version": "0.99.0.9000" } diff --git a/man/as_bibentry.Rd b/man/as_bibentry.Rd index cf2d8392..775823af 100644 --- a/man/as_bibentry.Rd +++ b/man/as_bibentry.Rd @@ -2,12 +2,33 @@ % Please edit documentation in R/as_bibentry.R, R/methods.R \name{as_bibentry} \alias{as_bibentry} +\alias{as_bibentry.default} +\alias{as_bibentry.character} +\alias{as_bibentry.NULL} +\alias{as_bibentry.list} +\alias{as_bibentry.cff} +\alias{as_bibentry.cff_ref_list} +\alias{as_bibentry.cff_ref} \alias{toBibtex.cff} \alias{toBibtex.cff_pers_list} \alias{toBibtex.cff_pers} \title{Create \code{bibentry} objects from several sources} \usage{ -as_bibentry(x, what = c("preferred", "references", "all")) +as_bibentry(x, ...) + +\method{as_bibentry}{default}(x, ...) + +\method{as_bibentry}{character}(x, ..., what = c("preferred", "references", "all")) + +\method{as_bibentry}{`NULL`}(x, ...) + +\method{as_bibentry}{list}(x, ...) + +\method{as_bibentry}{cff}(x, ..., what = c("preferred", "references", "all")) + +\method{as_bibentry}{cff_ref_list}(x, ...) + +\method{as_bibentry}{cff_ref}(x, ...) \method{toBibtex}{cff}(object, ..., what = c("preferred", "references", "all")) @@ -28,6 +49,8 @@ file on your in-development package. \item Path to a DESCRIPTION file (\code{"DESCRIPTION"}). }} +\item{...}{Arguments passed to \code{\link[utils:toLatex]{utils::toBibtex()}}.} + \item{what}{Fields to extract. The value could be: \itemize{ \item \code{preferred}: This would create a single entry with the main citation @@ -38,8 +61,6 @@ both the preferred citation info and the references. }} \item{object}{For \code{toBibtex.cff()} a \code{\link{cff}} object.} - -\item{...}{Arguments passed to \code{\link[utils:toLatex]{utils::toBibtex()}}.} } \value{ \code{as_bibentry()} returns a \code{bibentry} object (or a list of \code{bibentry} @@ -108,6 +129,7 @@ desc_file <- as_bibentry(path2) toBibtex(desc_file) } + } \references{ \itemize{ @@ -141,7 +163,7 @@ Other functions for converting between \strong{R} classes: \code{\link{as_cff}()}, \code{\link{as_cff_person}()} -Other S3 Methods for \code{cff}: +Other \strong{S3 Methods}: \code{\link{as_cff}()}, \code{\link{as_cff_person}()}, \code{\link{cff_class}} diff --git a/man/as_cff.Rd b/man/as_cff.Rd index a11027f6..79595e74 100644 --- a/man/as_cff.Rd +++ b/man/as_cff.Rd @@ -93,7 +93,7 @@ Other functions for converting between \strong{R} classes: \code{\link{as_bibentry}()}, \code{\link{as_cff_person}()} -Other S3 Methods for \code{cff}: +Other \strong{S3 Methods}: \code{\link{as_bibentry}()}, \code{\link{as_cff_person}()}, \code{\link{cff_class}} diff --git a/man/as_cff_person.Rd b/man/as_cff_person.Rd index ae9f13f8..5b2921a8 100644 --- a/man/as_cff_person.Rd +++ b/man/as_cff_person.Rd @@ -173,7 +173,7 @@ Other functions for converting between \strong{R} classes: \code{\link{as_bibentry}()}, \code{\link{as_cff}()} -Other S3 Methods for \code{cff}: +Other \strong{S3 Methods}: \code{\link{as_bibentry}()}, \code{\link{as_cff}()}, \code{\link{cff_class}} diff --git a/man/cff_class.Rd b/man/cff_class.Rd index 8d5ed828..39968c0e 100644 --- a/man/cff_class.Rd +++ b/man/cff_class.Rd @@ -180,7 +180,7 @@ as.person(the_cff$authors) \seealso{ \code{vignette("cffr", "cffr")} for a hands-on example. -Other S3 Methods for \code{cff}: +Other \strong{S3 Methods}: \code{\link{as_bibentry}()}, \code{\link{as_cff}()}, \code{\link{as_cff_person}()} diff --git a/man/cff_write_misc.Rd b/man/cff_write_misc.Rd index 68eccee6..d197ebd5 100644 --- a/man/cff_write_misc.Rd +++ b/man/cff_write_misc.Rd @@ -37,14 +37,7 @@ lines to be written.} \item{...}{ Arguments passed on to \code{\link[=as_bibentry]{as_bibentry}} \describe{ - \item{\code{what}}{Fields to extract. The value could be: -\itemize{ -\item \code{preferred}: This would create a single entry with the main citation -info of the package. -\item \code{references}: Extract all the entries on \code{references}. -\item \code{all}: A combination of the previous two options. This would extract -both the preferred citation info and the references. -}} + \item{\code{}}{} }} } \value{ diff --git a/man/roxygen/meta.R b/man/roxygen/meta.R index 54a0daf6..5db7f4db 100644 --- a/man/roxygen/meta.R +++ b/man/roxygen/meta.R @@ -8,6 +8,6 @@ list( schemas = "Other CFF schemas:", git = "Other Git/GitHub helpers:", deprecated = "Other deprecated functions:", - s3method = "Other S3 Methods for \\code{cff}:" + s3method = "Other \\strong{S3 Methods}:" ) ) diff --git a/tests/testthat/_snaps/as_bibentry.md b/tests/testthat/_snaps/as_bibentry.md index b0e779a9..80173bc5 100644 --- a/tests/testthat/_snaps/as_bibentry.md +++ b/tests/testthat/_snaps/as_bibentry.md @@ -541,6 +541,14 @@ author = {John Doe}, } +--- + + Code + as_bibentry(cff(), what = "anda") + Condition + Error in `as_bibentry()`: + ! `what` should be "preferred", "references", and "all", not "anda". + # From file Code @@ -579,6 +587,14 @@ institution = {Entity Project Team Conference entity}, } +--- + + Code + as_bibentry("anunkonwpackage") + Condition + Error in `as_bibentry()`: + ! Don't know how to extract a from "anunkonwpackage". If it is a package run `install.packages("anunkonwpackage")` first. + # Test anonymous Code @@ -671,5 +687,16 @@ Message x Can't convert to `bibentry()`: i A bibentry of bibtype 'Article' has to specify the fields: journal, year - ! Returning "NULL" + ! Returning empty + +# default + + Code + as_bibentry(a = 1) + Message + x Can't convert to `bibentry()`: + i argument "bibtype" is missing, with no default + ! Returning empty + Output + bibentry() diff --git a/tests/testthat/_snaps/mock-package.md b/tests/testthat/_snaps/mock-package.md index b562c1be..75458a3c 100644 --- a/tests/testthat/_snaps/mock-package.md +++ b/tests/testthat/_snaps/mock-package.md @@ -85,3 +85,16 @@ url = {https://ggplot2.tidyverse.org}, } +--- + + Code + toBibtex(a_bib) + Output + @Misc{basic, + title = {manyurls: A lot of urls}, + author = {Marc Basic}, + url = {https://test.github.io/package/}, + abstract = {This package has many urls. Specifically, 1 Bug Reports and 6 URLs. Expected is to have 1 repository-code, 1 url and 3 URLs, since there is 1 duplicate and 1 invalid url.}, + version = {0.1.6}, + } + diff --git a/tests/testthat/_snaps/xtra-check-bibtex-ruby.md b/tests/testthat/_snaps/xtra-check-bibtex-ruby.md index 408e81ee..0e6641d1 100644 --- a/tests/testthat/_snaps/xtra-check-bibtex-ruby.md +++ b/tests/testthat/_snaps/xtra-check-bibtex-ruby.md @@ -5,9 +5,9 @@ Message x Can't convert to `bibentry()`: i A bibentry of bibtype 'Book' has to specify the field: publisher - ! Returning "NULL" + ! Returning empty Output - NULL + bibentry() # preferred-citation-book diff --git a/tests/testthat/test-as_bibentry.R b/tests/testthat/test-as_bibentry.R index 7a95ee0b..107a1554 100644 --- a/tests/testthat/test-as_bibentry.R +++ b/tests/testthat/test-as_bibentry.R @@ -348,6 +348,9 @@ test_that("From plain cff with a citation", { test_that("From plain cff", { expect_silent(bib <- as_bibentry(cff())) expect_snapshot(toBibtex(bib)) + + + expect_snapshot(as_bibentry(cff(), what = "anda"), error = TRUE) }) test_that("From file", { @@ -357,11 +360,8 @@ test_that("From file", { bib <- as_bibentry(file) expect_snapshot(toBibtex(bib)) -}) -test_that("NULL", { - s <- NULL - expect_null(as_bibentry(s)) + expect_snapshot(as_bibentry("anunkonwpackage"), error = TRUE) }) @@ -453,12 +453,12 @@ test_that("From package", { expect_length(base, 1) - refs <- as_bibentry("rmarkdown", "references") + refs <- as_bibentry("rmarkdown", what = "references") expect_s3_class(refs, "bibentry") expect_gte(length(refs), 1) - all <- as_bibentry("rmarkdown", "all") + all <- as_bibentry("rmarkdown", what = "all") expect_s3_class(all, "bibentry") expect_length(all, length(base) + length(refs)) @@ -467,10 +467,13 @@ test_that("From package", { test_that("NULL references", { basic <- cff() - expect_null(as_bibentry(basic, "references")) + expect_identical( + as_bibentry(basic, what = "references"), + bibentry() + ) # Test all - expect_silent(l <- as_bibentry(basic, "all")) + expect_silent(l <- as_bibentry(basic, what = "all")) expect_length(l, 1) }) @@ -503,15 +506,13 @@ test_that("Corrupt entry", { x$year <- NULL x$journal <- NULL expect_snapshot(n <- as_bibentry(x)) - expect_null(n) + expect_identical(bibentry(), bibentry()) }) -test_that("Parser return nulls", { - expect_null(make_bibentry(NULL)) -}) -test_that("Fallback month", { - bib <- bibentry("Article", +test_that("default", { + bib_coerced <- as_bibentry( + bibtype = "Article", key = "knuth:1984", author = person("R Core Team"), title = "Literate Programming", @@ -525,17 +526,70 @@ test_that("Fallback month", { keywords = "Some, simple, keywords" ) - expect_identical(clean_str(bib[[1]]$month), "January") - x <- as_cff(bib) + direct <- bibentry( + bibtype = "Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + number = 2, + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" + ) + expect_identical(bib_coerced, direct) + + with_number_first <- as_bibentry( + number = 2, + bibtype = "Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" + ) - expect_identical(x[[1]]$month, "1") + direct <- bibentry( + number = 2, + bibtype = "Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" + ) + + expect_identical(with_number_first, direct) - x[[1]]$month <- NULL - x[[1]]$`date-published` <- "2010-12-31" - bib2 <- as_bibentry(x) + with_number_first <- as_bibentry( + number = 2, + bibtype = "Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" + ) - expect_identical(clean_str(bib2[[1]]$month), "dec") - x2 <- as_cff(bib2) - expect_identical(x2[[1]]$month, "12") + # No additional dots + expect_snapshot(as_bibentry(a = 1)) }) diff --git a/tests/testthat/test-mock-package.R b/tests/testthat/test-mock-package.R index 265f0ed7..b859fa5c 100644 --- a/tests/testthat/test-mock-package.R +++ b/tests/testthat/test-mock-package.R @@ -1,4 +1,5 @@ test_that("Test in mock package", { + skip_on_cran() current_dir <- getwd() name <- paste0("mock-pack", runif(1) * 10) @@ -15,6 +16,8 @@ test_that("Test in mock package", { to = "DESCRIPTION" ) + # Get bibentry + a_bib <- as_bibentry() # Create citation cit <- utils::readCitationFile( system.file("examples/CITATION_basic", @@ -75,4 +78,5 @@ test_that("Test in mock package", { expect_snapshot(cffobj) expect_snapshot(toBibtex(cit)) + expect_snapshot(toBibtex(a_bib)) }) From 0279a7603cd5a250524482ab46e25faed7c98291 Mon Sep 17 00:00:00 2001 From: Diego H Date: Tue, 5 Mar 2024 19:23:30 +0100 Subject: [PATCH 06/13] Fix tests --- tests/testthat/_snaps/as_bibentry.md | 4 +--- tests/testthat/_snaps/xtra-check-bibtex-ruby.md | 4 +--- tests/testthat/test-as_bibentry.R | 2 +- tests/testthat/test-xtra-check-bibtex-ruby.R | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/tests/testthat/_snaps/as_bibentry.md b/tests/testthat/_snaps/as_bibentry.md index 80173bc5..170c208f 100644 --- a/tests/testthat/_snaps/as_bibentry.md +++ b/tests/testthat/_snaps/as_bibentry.md @@ -692,11 +692,9 @@ # default Code - as_bibentry(a = 1) + s <- as_bibentry(a = 1) Message x Can't convert to `bibentry()`: i argument "bibtype" is missing, with no default ! Returning empty - Output - bibentry() diff --git a/tests/testthat/_snaps/xtra-check-bibtex-ruby.md b/tests/testthat/_snaps/xtra-check-bibtex-ruby.md index 0e6641d1..f5f9a204 100644 --- a/tests/testthat/_snaps/xtra-check-bibtex-ruby.md +++ b/tests/testthat/_snaps/xtra-check-bibtex-ruby.md @@ -1,13 +1,11 @@ # preferred-citation-book-missing Code - as_bibentry(x) + s <- as_bibentry(x) Message x Can't convert to `bibentry()`: i A bibentry of bibtype 'Book' has to specify the field: publisher ! Returning empty - Output - bibentry() # preferred-citation-book diff --git a/tests/testthat/test-as_bibentry.R b/tests/testthat/test-as_bibentry.R index 107a1554..3cd11b5f 100644 --- a/tests/testthat/test-as_bibentry.R +++ b/tests/testthat/test-as_bibentry.R @@ -591,5 +591,5 @@ test_that("default", { # No additional dots - expect_snapshot(as_bibentry(a = 1)) + expect_snapshot(s <- as_bibentry(a = 1)) }) diff --git a/tests/testthat/test-xtra-check-bibtex-ruby.R b/tests/testthat/test-xtra-check-bibtex-ruby.R index f49ceed3..2472708f 100644 --- a/tests/testthat/test-xtra-check-bibtex-ruby.R +++ b/tests/testthat/test-xtra-check-bibtex-ruby.R @@ -7,7 +7,7 @@ test_that("preferred-citation-book-missing", { package = "cffr" ) - expect_snapshot(as_bibentry(x)) + expect_snapshot(s <- as_bibentry(x)) }) test_that("preferred-citation-book", { From afedef6e70199704ef5a891e82f949a283eea912 Mon Sep 17 00:00:00 2001 From: Diego H Date: Tue, 5 Mar 2024 19:50:20 +0100 Subject: [PATCH 07/13] More coverage on test --- R/as_cff_reference.R | 40 +++++------------------ tests/testthat/_snaps/as_cff_reference.md | 17 ++++++++++ tests/testthat/test-as_cff_reference.R | 15 +++++++++ 3 files changed, 40 insertions(+), 32 deletions(-) diff --git a/R/as_cff_reference.R b/R/as_cff_reference.R index e13b0729..b38d0126 100644 --- a/R/as_cff_reference.R +++ b/R/as_cff_reference.R @@ -100,11 +100,9 @@ make_cff_reference <- function(bib) { # Last step---- # Initial order but starting with type, title, authors - final_order <- unique(c( - "type", "title", "authors", - init_ord, - names(parse_cit) - )) + final_order <- unique( + c("type", "title", "authors", init_ord, names(parse_cit)) + ) parse_cit <- parse_cit[final_order] @@ -225,10 +223,6 @@ parse_bibtex_fields <- function(parse_cit) { names(parse_cit) <- nm - # Remove all instances of keywords except the first one - index <- which(nm == "keywords") - if (length(index) > 1) parse_cit <- parse_cit[-index[-1]] - # Additionally, need to delete keywords if length is less than 2, # errors on validation if (length(parse_cit$keywords) < 2) { @@ -243,30 +237,12 @@ parse_bibtex_fields <- function(parse_cit) { - # Treat dates---- - datpub <- parse_cit$`date-published` - - - if (!is.null(datpub)) { - datepub <- as.Date(as.character(datpub), optional = TRUE) - if (is.na(datepub)) { - parse_cit$`date-published` <- NULL - } else { - parse_cit$`date-published` <- as.character(datepub) - } - } - - datacc <- parse_cit$`date-accessed` + # Treat additional dates ---- + dpub <- clean_str(parse_cit$`date-published`) + parse_cit$`date-published` <- clean_str(as.Date(dpub, optional = TRUE)) - - if (!is.null(datacc)) { - datacc <- as.Date(as.character(datacc), optional = TRUE) - if (is.na(datacc)) { - parse_cit$`date-accessed` <- NULL - } else { - parse_cit$`date-accessed` <- as.character(datacc) - } - } + datacc <- clean_str(parse_cit$`date-accessed`) + parse_cit$`date-accessed` <- clean_str(as.Date(datacc, optional = TRUE)) # Treat pages diff --git a/tests/testthat/_snaps/as_cff_reference.md b/tests/testthat/_snaps/as_cff_reference.md index 909d91e2..ae2bdf50 100644 --- a/tests/testthat/_snaps/as_cff_reference.md +++ b/tests/testthat/_snaps/as_cff_reference.md @@ -282,6 +282,23 @@ publisher: name: Random House +# Fallback date + + Code + init_cff + Output + - type: generic + title: Test + authors: + - family-names: Jean + given-names: Billy + date-published: '2050-01-12' + date-accessed: '2099-02-02' + publisher: + name: Random House + month: '1' + year: '2050' + # Article Code diff --git a/tests/testthat/test-as_cff_reference.R b/tests/testthat/test-as_cff_reference.R index 82c6eea1..a880c845 100644 --- a/tests/testthat/test-as_cff_reference.R +++ b/tests/testthat/test-as_cff_reference.R @@ -194,6 +194,21 @@ test_that("Test inputs", { expect_true(cff_validate(cffobj, verbose = FALSE)) }) +test_that("Fallback date", { + bib <- bibentry("Misc", + title = "Test", + author = "Billy Jean", + date = "2050-01-12", + urldate = "2099-02-02", + publisher = "Random House", + type = "RANDOM" + ) + + init_cff <- as_cff(bib) + + expect_snapshot(init_cff) +}) + # Parse citation from BibTeX ---- test_that("Article", { From 0b3fa30136e83da995672d0f361a8ed315a85036 Mon Sep 17 00:00:00 2001 From: Diego H Date: Wed, 6 Mar 2024 01:55:23 +0100 Subject: [PATCH 08/13] End as_cff_reference --- R/as_cff_reference.R | 215 +++++------ R/cff_read.R | 8 +- R/parse_citation.R | 210 ----------- R/utils-cff_ref.R | 429 ++++++++++++++++++++++ R/utils-schema.R | 48 +++ R/utils.R | 2 +- tests/testthat/_snaps/as_cff_reference.md | 7 + tests/testthat/test-as_cff_reference.R | 22 ++ 8 files changed, 620 insertions(+), 321 deletions(-) delete mode 100644 R/parse_citation.R create mode 100644 R/utils-cff_ref.R diff --git a/R/as_cff_reference.R b/R/as_cff_reference.R index b38d0126..45f77573 100644 --- a/R/as_cff_reference.R +++ b/R/as_cff_reference.R @@ -4,7 +4,10 @@ as_cff_reference <- function(x) { # Need always to be unnamed bibentry bib <- unname(x) - + if (any(duplicated(bib))) { + cli::cli_alert_info("Removing duplicate {.cls bibentry} objects.") + bib <- unique(bib) + } # Return always a list the_list <- lapply(bib, make_cff_reference) @@ -12,11 +15,11 @@ as_cff_reference <- function(x) { } make_cff_reference <- function(bib) { - # Parse BibTeX entry ---- - parse_cit <- parse_bibtex_entry(bib) + # Get BibTeX entry ---- + cit_list <- get_bibtex_entry(bib) ## If no title (case of some Misc) then return null - if (!("title" %in% names(parse_cit))) { + if (!("title" %in% names(cit_list))) { entry <- capture.output(print(bib, bibtex = FALSE)) entry <- as.character(entry) @@ -25,113 +28,113 @@ make_cff_reference <- function(bib) { return(NULL) } - # Parse BibTeX fields ---- - parsed_fields <- parse_bibtex_fields(parse_cit) + # Get BibTeX fields ---- + field_list <- get_bibtex_fields(cit_list) # VGAM: title is a vector - parsed_fields$title <- clean_str(parsed_fields$title) + field_list$title <- clean_str(field_list$title) ## Handle collection types ---- - parsed_fields <- add_bibtex_coltype(parsed_fields) + field_list <- add_bibtex_coltype(field_list) ## Add conference - parsed_fields <- add_conference(parsed_fields) + field_list <- add_conference(field_list) # Create BibTeX to CFF institution logic ---- - parsed_fields <- parse_bibtex_to_inst(parsed_fields) + field_list <- get_bibtex_inst(field_list) - # Parse persons ---- + # Coerce persons ---- # Special case: authors # Some keys does not strictly require authors, so we create one for cff # https://github.com/citation-file-format/citation-file-format/blob/main/ # (cont) schema-guide.md#how-to-deal-with-unknown-individual-authors - if (is.null(parsed_fields$authors)) { - parsed_fields$authors <- person(family = "anonymous") + if (is.null(field_list$authors)) { + field_list$authors <- person(family = "anonymous") } ## authors ---- - parse_all_authors <- as_cff_person(parsed_fields$authors) - parsed_fields$authors <- unique(parse_all_authors) + get_all_authors <- as_cff_person(field_list$authors) + field_list$authors <- unique(get_all_authors) ## other persons---- - parse_other_persons <- building_other_persons(parsed_fields) + get_other_persons <- get_bibtex_other_pers(field_list) # Keep order here, we would use it later - init_ord <- names(parsed_fields) + init_ord <- names(field_list) - parse_cit <- c( - parsed_fields[!names(parsed_fields) %in% names(parse_other_persons)], - parse_other_persons + cit_list <- c( + field_list[!names(field_list) %in% names(get_other_persons)], + get_other_persons ) # Building blocks---- # Fallback for year and month: use date-published ---- - parse_cit <- fallback_dates(parse_cit) + cit_list <- fallback_dates(cit_list) ## doi---- - bb_doi <- building_doi(parse_cit) - parse_cit$doi <- bb_doi$doi + bb_doi <- get_bibtex_doi(cit_list) + cit_list$doi <- bb_doi$doi ### identifiers ---- - if (!is.null(bb_doi$identifiers)) parse_cit$identifiers <- bb_doi$identifiers + if (!is.null(bb_doi$identifiers)) cit_list$identifiers <- bb_doi$identifiers ## url---- - bb_url <- building_url(parse_cit) - parse_cit$url <- bb_url$url + bb_url <- get_bibtex_url(cit_list) + cit_list$url <- bb_url$url ### final identifiers---- # Identifies (additional dois and urls) if (!is.null(bb_url$identifiers)) { - parse_cit$identifiers <- append( - parse_cit$identifiers, + cit_list$identifiers <- append( + cit_list$identifiers, bb_url$identifiers ) } ## Add thesis type ---- - parse_cit <- add_thesis(parse_cit) + cit_list <- add_thesis(cit_list) ## Handle location ---- - parse_cit <- add_address(parse_cit) + cit_list <- add_address(cit_list) # Last step---- # Initial order but starting with type, title, authors final_order <- unique( - c("type", "title", "authors", init_ord, names(parse_cit)) + c("type", "title", "authors", init_ord, names(cit_list)) ) - parse_cit <- parse_cit[final_order] + cit_list <- cit_list[final_order] # Remove non-valid names validnames <- cff_schema_definitions_refs() - parse_cit <- parse_cit[names(parse_cit) %in% validnames] + cit_list <- cit_list[names(cit_list) %in% validnames] - parse_cit <- drop_null(parse_cit) + cit_list <- drop_null(cit_list) - return(parse_cit) + return(cit_list) } #' Extract and map BibTeX entry #' @noRd -parse_bibtex_entry <- function(bib) { +get_bibtex_entry <- function(bib) { # Unclass and manage entry type # Extract type from BibTeX init_type <- attr(unclass(bib)[[1]], "bibtype") init_type <- clean_str(tolower(init_type)) - parse_cit <- drop_null(unclass(bib)[[1]]) + cit_list <- drop_null(unclass(bib)[[1]]) # Add fields - parse_cit$bibtex_entry <- init_type + cit_list$bibtex_entry <- init_type # Manage type from BibTeX and convert to CFF - # This overwrite the BibTeX type field. Not parsed by this function - parse_cit$type <- switch(init_type, + # This overwrite the BibTeX type field. Not treated by this function + cit_list$type <- switch(init_type, "article" = "article", "book" = "book", "booklet" = "pamphlet", @@ -151,22 +154,22 @@ parse_bibtex_entry <- function(bib) { # Check if it an inbook with booktitle (BibLaTeX style) - if (all(init_type == "inbook", "booktitle" %in% names(parse_cit))) { + if (all(init_type == "inbook", "booktitle" %in% names(cit_list))) { # Make it incollection - parse_cit$bibtex_entry <- "incollection" - parse_cit$type <- "generic" + cit_list$bibtex_entry <- "incollection" + cit_list$type <- "generic" } - return(parse_cit) + return(cit_list) } #' Adapt names from R citation()/BibTeX to cff format #' @noRd -parse_bibtex_fields <- function(parse_cit) { +get_bibtex_fields <- function(cit_list) { # to lowercase - names(parse_cit) <- tolower(names(parse_cit)) - nm <- names(parse_cit) + names(cit_list) <- tolower(names(cit_list)) + nm <- names(cit_list) # Standard BibTeX fields: # address annote author booktitle chapter crossref edition editor # howpublished institution journal key month note number organization pages @@ -193,7 +196,7 @@ parse_bibtex_fields <- function(parse_cit) { nm[nm == "address"] <- "location" nm[nm == "pages"] <- "bibtex_pages" # This would be removed later - # Parse some fields from BibLaTeX + # Get some fields from BibLaTeX nm[nm == "date"] <- "date-published" nm[nm == "file"] <- "filename" nm[nm == "issuetitle"] <- "issue-title" @@ -207,10 +210,10 @@ parse_bibtex_fields <- function(parse_cit) { # Keywords may be duplicated, unify if ("keywords" %in% nm) { - kwords <- unlist(parse_cit["keywords" == nm]) + kwords <- unlist(cit_list["keywords" == nm]) kwords <- clean_str(paste(kwords, collapse = ", ")) kwords <- trimws(unique(unlist(strsplit(kwords, ",|;")))) - parse_cit$keywords <- unique(kwords) + cit_list$keywords <- unique(kwords) } # Not mapped: @@ -221,48 +224,48 @@ parse_bibtex_fields <- function(parse_cit) { # key is a special field, treated apart # Fields ignored: annote, crossref - names(parse_cit) <- nm + names(cit_list) <- nm # Additionally, need to delete keywords if length is less than 2, # errors on validation - if (length(parse_cit$keywords) < 2) { - parse_cit$keywords <- NULL + if (length(cit_list$keywords) < 2) { + cit_list$keywords <- NULL } # Treat location ---- - loc <- parse_cit$location + loc <- cit_list$location - if (!is.null(loc)) parse_cit$location <- loc + if (!is.null(loc)) cit_list$location <- loc # Treat additional dates ---- - dpub <- clean_str(parse_cit$`date-published`) - parse_cit$`date-published` <- clean_str(as.Date(dpub, optional = TRUE)) + dpub <- clean_str(cit_list$`date-published`) + cit_list$`date-published` <- clean_str(as.Date(dpub, optional = TRUE)) - datacc <- clean_str(parse_cit$`date-accessed`) - parse_cit$`date-accessed` <- clean_str(as.Date(datacc, optional = TRUE)) + datacc <- clean_str(cit_list$`date-accessed`) + cit_list$`date-accessed` <- clean_str(as.Date(datacc, optional = TRUE)) # Treat pages - pages <- parse_cit$bibtex_pages + pages <- cit_list$bibtex_pages if (!is.null(pages)) { spl <- unlist(strsplit(pages, "--")) - parse_cit$start <- spl[1] + cit_list$start <- spl[1] - if (length(spl) > 1) parse_cit$end <- paste(spl[-1], collapse = "--") + if (length(spl) > 1) cit_list$end <- paste(spl[-1], collapse = "--") } - return(parse_cit) + return(cit_list) } #' Modify mapping of some org. fields on BibTeX to CFF #' @noRd -parse_bibtex_to_inst <- function(parsed_fields) { +get_bibtex_inst <- function(field_list) { # Initial values - bibtex_entry <- parsed_fields$bibtex_entry + bibtex_entry <- field_list$bibtex_entry to_replace <- switch(bibtex_entry, "mastersthesis" = "school", "phdthesis" = "school", @@ -274,29 +277,29 @@ parse_bibtex_to_inst <- function(parsed_fields) { ) if (to_replace == "institution") { - return(parsed_fields) + return(field_list) } # Rest of cases remove bibtex institution and rename - nms <- names(parsed_fields) + nms <- names(field_list) - parsed_fields <- parsed_fields["institution" != nms] + field_list <- field_list["institution" != nms] # Rename - nms2 <- names(parsed_fields) + nms2 <- names(field_list) nms2[nms2 == to_replace] <- "institution" - names(parsed_fields) <- nms2 + names(field_list) <- nms2 - parsed_fields + field_list } -add_conference <- function(parsed_fields) { - bibtex_entry <- parsed_fields$bibtex_entry +add_conference <- function(field_list) { + bibtex_entry <- field_list$bibtex_entry if (bibtex_entry %in% c("conference", "inproceedings", "proceedings")) { - parsed_fields$conference <- parsed_fields$`collection-title` + field_list$conference <- field_list$`collection-title` } - return(parsed_fields) + return(field_list) } @@ -304,25 +307,25 @@ add_conference <- function(parsed_fields) { #' Adapt cff keys to bibtex entries #' @noRd -add_thesis <- function(parse_cit) { - bibtex_entry <- parse_cit$bibtex_entry +add_thesis <- function(cit_list) { + bibtex_entry <- cit_list$bibtex_entry if (!bibtex_entry %in% c("phdthesis", "mastersthesis")) { - return(parse_cit) + return(cit_list) } - parse_cit$`thesis-type` <- switch(bibtex_entry, + cit_list$`thesis-type` <- switch(bibtex_entry, phdthesis = "PhD Thesis", "Master's Thesis" ) - parse_cit + cit_list } -add_address <- function(parse_cit) { - loc <- parse_cit$location$name +add_address <- function(cit_list) { + loc <- cit_list$location$name # If available if (is.null(loc)) { - return(parse_cit) + return(cit_list) } # At this point is in location, see to move @@ -333,66 +336,66 @@ add_address <- function(parse_cit) { # 3. To publisher # Otherwise leave on location - nms <- names(parse_cit) + nms <- names(cit_list) has_conf <- "conference" %in% nms has_inst <- "institution" %in% nms has_publish <- "publisher" %in% nms if (!any(has_conf, has_inst, has_publish)) { - return(parse_cit) + return(cit_list) } if (has_conf) { - parse_cit$conference$address <- loc - parse_cit$location <- NULL + cit_list$conference$address <- loc + cit_list$location <- NULL } else if (has_inst) { - parse_cit$institution$address <- loc - parse_cit$location <- NULL + cit_list$institution$address <- loc + cit_list$location <- NULL } else { - parse_cit$publisher$address <- loc - parse_cit$location <- NULL + cit_list$publisher$address <- loc + cit_list$location <- NULL } - return(parse_cit) + return(cit_list) } -add_bibtex_coltype <- function(parsed_fields) { +add_bibtex_coltype <- function(field_list) { # Add collection-type if applicable and rearrange fields - nms <- names(parsed_fields) + nms <- names(field_list) if (!"collection-title" %in% nms) { - return(parsed_fields) + return(field_list) } # Made collection-type if we create collection-title - bibtex_type <- parsed_fields$bibtex_entry + bibtex_type <- field_list$bibtex_entry # Remove `in` at init: inbook, incollection affected coltype <- clean_str(gsub("^in", "", bibtex_type)) - parsed_fields$`collection-type` <- coltype + field_list$`collection-type` <- coltype # Rearrange to make both collection keys together nm_first <- nms[seq(1, match("collection-title", nms))] nms_end <- unique(c(nm_first, "collection-type", nms)) - parsed_fields <- parsed_fields[nms_end] + field_list <- field_list[nms_end] - return(parsed_fields) + return(field_list) } -fallback_dates <- function(parse_cit) { +fallback_dates <- function(cit_list) { # Fallback for year and month: use date-published - if (is.null(parse_cit$month) && !is.null(parse_cit$`date-published`)) { - parse_cit$month <- format(as.Date(parse_cit$`date-published`), "%m") + if (is.null(cit_list$month) && !is.null(cit_list$`date-published`)) { + cit_list$month <- format(as.Date(cit_list$`date-published`), "%m") } - if (is.null(parse_cit$year) && !is.null(parse_cit$`date-published`)) { - parse_cit$year <- format(as.Date(parse_cit$`date-published`), "%Y") + if (is.null(cit_list$year) && !is.null(cit_list$`date-published`)) { + cit_list$year <- format(as.Date(cit_list$`date-published`), "%Y") } ## month ---- - parse_cit$month <- building_month(parse_cit) + cit_list$month <- get_bibtex_month(cit_list) - return(parse_cit) + return(cit_list) } diff --git a/R/cff_read.R b/R/cff_read.R index 202c0572..e61e34b2 100644 --- a/R/cff_read.R +++ b/R/cff_read.R @@ -179,7 +179,7 @@ cff_read_description <- function(path, cff_version = "1.2.0", ) - list_fields <- list( + field_list <- list( "cff-version" = cff_version, message = msg, type = "software", @@ -198,11 +198,11 @@ cff_read_description <- function(path, cff_version = "1.2.0", ) if (gh_keywords) { - ghtopics <- parse_ghtopics(list_fields) - list_fields$keywords <- unique(c(list_fields$keywords, ghtopics)) + ghtopics <- parse_ghtopics(field_list) + field_list$keywords <- unique(c(field_list$keywords, ghtopics)) } - new_cff(list_fields) + new_cff(field_list) } #' @export diff --git a/R/parse_citation.R b/R/parse_citation.R deleted file mode 100644 index 802e8014..00000000 --- a/R/parse_citation.R +++ /dev/null @@ -1,210 +0,0 @@ -## Building blocks ---- - -#' BB for doi -#' @noRd -building_doi <- function(parse_cit) { - dois <- unlist(parse_cit[names(parse_cit) == "doi"]) - - dois <- unlist(lapply(dois, function(x) { - x <- gsub("^https://doi.org/", "", x) - x <- clean_str(x) - })) - - - dois <- unique(as.character(dois)) - - - # The first doi goes to doi key - - doi <- unlist(dois[1]) - - # The rest goes to identifies - identifiers <- lapply(dois[-1], function(x) { - list( - type = "doi", - value = clean_str(x) - ) - }) - if (length(identifiers) == 0) identifiers <- NULL - doi_list <- list( - doi = clean_str(doi), - identifiers = identifiers - ) - - return(doi_list) -} - -#' BB for month -#' @noRd -building_month <- function(parse_cit) { - mnt <- parse_cit$month - - if (is.null(mnt) || is.na(mnt)) { - return(NULL) - } - - # Guess if a valid integer is provided and output - mnt_num <- tryCatch(as.numeric(mnt), - warning = function(e) { - return(FALSE) - } - ) - - if (is.numeric(mnt_num) && mnt_num > 0 && mnt_num <= 12) { - res <- clean_str(mnt_num) - return(res) - } - - # else transform - # Get month, everything in lowercase - month <- clean_str(tolower(mnt)) - - # Index on abbreviation - res <- clean_str(which(tolower(month.abb) == month)) - if (!is.null(res)) { - return(res) - } - # Index on full - - res <- clean_str(which(tolower(month.name) == month)) - res -} - -#' BB for URL -#' @noRd -building_url <- function(parse_cit) { - ## Parse url: see bug with cff_create("rgeos") - if (is.character(parse_cit$url)) { - allurls <- as.character(parse_cit[names(parse_cit) == "url"]) - allurls <- unlist(strsplit(allurls, " |,|\\n")) - } else { - allurls <- parse_cit$url - } - - allurls <- allurls[is_url(allurls)] - # The first url goes to url key - - url <- unlist(allurls[1]) - - # The rest goes to identifies - identifiers <- lapply(allurls[-1], function(x) { - list( - type = "url", - value = clean_str(x) - ) - }) - - if (length(identifiers) == 0) identifiers <- NULL - - url_list <- list( - url = clean_str(url), - identifiers = identifiers - ) - - return(url_list) -} - -#' BB for other persons -#' @noRd -building_other_persons <- function(parsed_fields) { - others <- drop_null(parsed_fields[other_persons()]) - - # If any is person type (example, editors) then paste and collapse - - others <- lapply(others, function(x) { - if (inherits(x, "person")) { - x <- paste(x, collapse = " and ") - } else { - return(x) - } - }) - - - - # Select subsets - all_pers <- other_persons() - toent <- other_persons_entity() - toent_pers <- entity_person() - - toauto_end <- all_pers[!all_pers %in% c(toent, toent_pers)] - toent_end <- toent[!toent %in% toent_pers] - - # Parse as entity - toentity <- others[names(others) %in% toent_end] - toentity <- lapply(toentity, function(x) { - list(name = clean_str(x)) - }) - - # As persons or entities using bibtex - toentity_pers <- others[names(others) %in% toent_pers] - toentity_pers <- lapply(toentity_pers, function(x) { - bibtex <- paste(x, collapse = " and ") - # Unname - names(bibtex) <- NULL - - - end <- as_cff_person(bibtex) - - # If has names then it should be moved to a lower level on a list - if (!is.null(names(end))) end <- list(end) - - return(end) - }) - - - toperson <- others[names(others) %in% toauto_end] - toperson <- lapply(toperson, as_cff_person) - - - # Bind and reorder - parsedothers <- c(toentity, toperson, toentity_pers) - parsedothers <- parsedothers[names(others)] - - return(parsedothers) -} - - -#' Vector other persons -#' @noRd -other_persons <- function() { - pers_ent <- c( - "contact", - "editors", - "editors-series", - "recipients", - "senders", - "translators" - ) - - pers_ent <- sort(unique(c( - pers_ent, - other_persons_entity(), - entity_person() - ))) - - return(pers_ent) -} - -#' Vector other persons to be parsed as entities -#' @noRd -other_persons_entity <- function() { - entities <- c( - "conference", - "database-provider", - "institution", - "location", - "publisher" - ) - - entities -} - -#' This may be entities or persons -#' @noRd -entity_person <- function() { - forced <- c( - "editors", - "editors-series" - ) - forced -} diff --git a/R/utils-cff_ref.R b/R/utils-cff_ref.R new file mode 100644 index 00000000..baab34ca --- /dev/null +++ b/R/utils-cff_ref.R @@ -0,0 +1,429 @@ +# Utils for as_cff_reference + +#' Extract and map BibTeX entry +#' @noRd +get_bibtex_entry <- function(bib) { + # Unclass and manage entry type + # Extract type from BibTeX + init_type <- attr(unclass(bib)[[1]], "bibtype") + init_type <- clean_str(tolower(init_type)) + + + cit_list <- drop_null(unclass(bib)[[1]]) + + # Add fields + cit_list$bibtex_entry <- init_type + + # Manage type from BibTeX and convert to CFF + # This overwrite the BibTeX type field. Not treated by this function + cit_list$type <- switch(init_type, + "article" = "article", + "book" = "book", + "booklet" = "pamphlet", + "conference" = "conference-paper", + "inbook" = "book", + # "incollection" = , + "inproceedings" = "conference-paper", + "manual" = "manual", + "mastersthesis" = "thesis", + "misc" = "generic", + "phdthesis" = "thesis", + "proceedings" = "proceedings", + "techreport" = "report", + "unpublished" = "unpublished", + "generic" + ) + + + # Check if it an inbook with booktitle (BibLaTeX style) + if (all(init_type == "inbook", "booktitle" %in% names(cit_list))) { + # Make it incollection + cit_list$bibtex_entry <- "incollection" + cit_list$type <- "generic" + } + + + return(cit_list) +} + +#' Adapt names from R citation()/BibTeX to cff format +#' @noRd +get_bibtex_fields <- function(cit_list) { + # to lowercase + names(cit_list) <- tolower(names(cit_list)) + nm <- names(cit_list) + # Standard BibTeX fields: + # address annote author booktitle chapter crossref edition editor + # howpublished institution journal key month note number organization pages + # publisher school series title type year + + # No mapping needed (direct mapping) + # edition journal month publisher title volume year + + # Mapped: + # author booktitle series chapter editor howpublished note number + + nm[nm == "author"] <- "authors" + # Make collection title + # booktitle takes precedence over series + nm[nm == "booktitle"] <- "collection-title" + if (!"collection-title" %in% nm) { + nm[nm == "series"] <- "collection-title" + } + nm[nm == "chapter"] <- "section" + nm[nm == "editor"] <- "editors" + nm[nm == "howpublished"] <- "medium" + nm[nm == "note"] <- "notes" + nm[nm == "number"] <- "issue" + nm[nm == "address"] <- "location" + nm[nm == "pages"] <- "bibtex_pages" # This would be removed later + + # Get some fields from BibLaTeX + nm[nm == "date"] <- "date-published" + nm[nm == "file"] <- "filename" + nm[nm == "issuetitle"] <- "issue-title" + nm[nm == "translator"] <- "translators" + nm[nm == "urldate"] <- "date-accessed" + nm[nm == "pagetotal"] <- "pages" + + # Other BibLaTeX fields that does not require any mapping + # abstract, doi, isbn, issn, url, version + + + # Keywords may be duplicated, unify + if ("keywords" %in% nm) { + kwords <- unlist(cit_list["keywords" == nm]) + kwords <- clean_str(paste(kwords, collapse = ", ")) + kwords <- trimws(unique(unlist(strsplit(kwords, ",|;")))) + cit_list$keywords <- unique(kwords) + } + + # Not mapped: + # annote crossref key organization series type + # + # Fields address, organization, series and type are treated on + # main function + # key is a special field, treated apart + # Fields ignored: annote, crossref + + names(cit_list) <- nm + + # Additionally, need to delete keywords if length is less than 2, + # errors on validation + if (length(cit_list$keywords) < 2) { + cit_list$keywords <- NULL + } + + # Treat location ---- + + loc <- cit_list$location + + if (!is.null(loc)) cit_list$location <- loc + + + + # Treat additional dates ---- + dpub <- clean_str(cit_list$`date-published`) + cit_list$`date-published` <- clean_str(as.Date(dpub, optional = TRUE)) + + datacc <- clean_str(cit_list$`date-accessed`) + cit_list$`date-accessed` <- clean_str(as.Date(datacc, optional = TRUE)) + + # Treat pages + + pages <- cit_list$bibtex_pages + if (!is.null(pages)) { + spl <- unlist(strsplit(pages, "--")) + + cit_list$start <- spl[1] + + if (length(spl) > 1) cit_list$end <- paste(spl[-1], collapse = "--") + } + + return(cit_list) +} + +#' Modify mapping of some org. fields on BibTeX to CFF +#' @noRd +get_bibtex_inst <- function(field_list) { + # Initial values + bibtex_entry <- field_list$bibtex_entry + to_replace <- switch(bibtex_entry, + "mastersthesis" = "school", + "phdthesis" = "school", + "conference" = "organization", + "inproceedings" = "organization", + "manual" = "organization", + "proceedings" = "organization", + "institution" + ) + + if (to_replace == "institution") { + return(field_list) + } + + # Rest of cases remove bibtex institution and rename + nms <- names(field_list) + + field_list <- field_list["institution" != nms] + + # Rename + nms2 <- names(field_list) + nms2[nms2 == to_replace] <- "institution" + names(field_list) <- nms2 + + field_list +} + +add_conference <- function(field_list) { + bibtex_entry <- field_list$bibtex_entry + + if (bibtex_entry %in% c("conference", "inproceedings", "proceedings")) { + field_list$conference <- field_list$`collection-title` + } + return(field_list) +} + + + + +#' Adapt cff keys to bibtex entries +#' @noRd +add_thesis <- function(cit_list) { + bibtex_entry <- cit_list$bibtex_entry + if (!bibtex_entry %in% c("phdthesis", "mastersthesis")) { + return(cit_list) + } + + cit_list$`thesis-type` <- switch(bibtex_entry, + phdthesis = "PhD Thesis", + "Master's Thesis" + ) + + cit_list +} + +add_address <- function(cit_list) { + loc <- cit_list$location$name + # If available + if (is.null(loc)) { + return(cit_list) + } + + # At this point is in location, see to move + + # Logic order. + # 1. To conference + # 2. To institution + # 3. To publisher + # Otherwise leave on location + + nms <- names(cit_list) + has_conf <- "conference" %in% nms + has_inst <- "institution" %in% nms + has_publish <- "publisher" %in% nms + + if (!any(has_conf, has_inst, has_publish)) { + return(cit_list) + } + + if (has_conf) { + cit_list$conference$address <- loc + cit_list$location <- NULL + } else if (has_inst) { + cit_list$institution$address <- loc + cit_list$location <- NULL + } else { + cit_list$publisher$address <- loc + cit_list$location <- NULL + } + + return(cit_list) +} + +add_bibtex_coltype <- function(field_list) { + # Add collection-type if applicable and rearrange fields + nms <- names(field_list) + + if (!"collection-title" %in% nms) { + return(field_list) + } + + # Made collection-type if we create collection-title + bibtex_type <- field_list$bibtex_entry + + # Remove `in` at init: inbook, incollection affected + coltype <- clean_str(gsub("^in", "", bibtex_type)) + field_list$`collection-type` <- coltype + + # Rearrange to make both collection keys together + nm_first <- nms[seq(1, match("collection-title", nms))] + + nms_end <- unique(c(nm_first, "collection-type", nms)) + + field_list <- field_list[nms_end] + + return(field_list) +} + +fallback_dates <- function(cit_list) { + # Fallback for year and month: use date-published + if (is.null(cit_list$month) && !is.null(cit_list$`date-published`)) { + cit_list$month <- format(as.Date(cit_list$`date-published`), "%m") + } + + if (is.null(cit_list$year) && !is.null(cit_list$`date-published`)) { + cit_list$year <- format(as.Date(cit_list$`date-published`), "%Y") + } + + ## month ---- + cit_list$month <- get_bibtex_month(cit_list) + + return(cit_list) +} + +#' BB for doi +#' @noRd +get_bibtex_doi <- function(cit_list) { + dois <- unlist(cit_list[names(cit_list) == "doi"]) + + dois <- unlist(lapply(dois, function(x) { + x <- gsub("^https://doi.org/", "", x) + x <- clean_str(x) + })) + + + dois <- unique(as.character(dois)) + + + # The first doi goes to doi key + doi <- unlist(dois[1]) + + # The rest goes to identifies + identifiers <- lapply(dois[-1], function(x) { + list( + type = "doi", + value = clean_str(x) + ) + }) + if (length(identifiers) == 0) identifiers <- NULL + doi_list <- list( + doi = clean_str(doi), + identifiers = identifiers + ) + return(doi_list) +} + +#' BB for month +#' @noRd +get_bibtex_month <- function(cit_list) { + mnt <- clean_str(cit_list$month) + + if (is.null(mnt)) { + return(NULL) + } + + # If number + if (grepl("^\\d+$", mnt)) { + # Guess if a valid integer is provided and output + mnt_num <- as.numeric(mnt) + mnt_num <- mnt_num[mnt_num %in% seq(1, 12)] + return(clean_str(mnt_num)) + } + + # else transform + # Get month, everything in lowercase + month <- clean_str(tolower(mnt)) + month <- substr(month, 1, 3) + + # Index on abbreviation + low_month <- tolower(month.abb) + res <- seq(1, 12)[month == low_month] + clean_str(res[1]) +} + +#' BB for URL +#' @noRd +get_bibtex_url <- function(cit_list) { + ## Get url: see bug with cff_create("rgeos") + if (is.character(cit_list$url)) { + allurls <- as.character(cit_list[names(cit_list) == "url"]) + allurls <- unlist(strsplit(allurls, " |,|\\n")) + } else { + allurls <- cit_list$url + } + + allurls <- allurls[is_url(allurls)] + # The first url goes to url key + + url <- unlist(allurls[1]) + + # The rest goes to identifies + identifiers <- lapply(allurls[-1], function(x) { + list( + type = "url", + value = clean_str(x) + ) + }) + + if (length(identifiers) == 0) identifiers <- NULL + + url_list <- list( + url = clean_str(url), + identifiers = identifiers + ) + + return(url_list) +} + +#' BB for other persons +#' @noRd +get_bibtex_other_pers <- function(field_list) { + others <- drop_null(field_list[other_persons()]) + + # If any is person type (example, editors) then paste and collapse + + others <- lapply(others, function(x) { + if (inherits(x, "person")) { + x <- paste(x, collapse = " and ") + } else { + return(x) + } + }) + + + + # Select subsets + all_pers <- other_persons() + toent <- other_persons_entity() + toent_pers <- entity_person() + + toauto_end <- all_pers[!all_pers %in% c(toent, toent_pers)] + toent_end <- toent[!toent %in% toent_pers] + + # Entity + toentity <- others[names(others) %in% toent_end] + toentity <- lapply(toentity, function(x) { + list(name = clean_str(x)) + }) + + # As persons or entities using bibtex + toentity_pers <- others[names(others) %in% toent_pers] + toentity_pers <- lapply(toentity_pers, function(x) { + bibtex <- paste(x, collapse = " and ") + end <- as_cff_person(bibtex) + + return(end) + }) + + + toperson <- others[names(others) %in% toauto_end] + toperson <- lapply(toperson, as_cff_person) + + + # Bind and reorder + other_list <- c(toentity, toperson, toentity_pers) + other_list <- other_list[names(others)] + + return(other_list) +} diff --git a/R/utils-schema.R b/R/utils-schema.R index 88c3c3c9..29c7e06d 100644 --- a/R/utils-schema.R +++ b/R/utils-schema.R @@ -254,3 +254,51 @@ cff_schema_definitions_refs <- function() { ) definitions_reference } + + +# Helper lists and vectors ---- + +#' Vector other persons +#' @noRd +other_persons <- function() { + pers_ent <- c( + "contact", + "editors", + "editors-series", + "recipients", + "senders", + "translators" + ) + + pers_ent <- sort(unique(c( + pers_ent, + other_persons_entity(), + entity_person() + ))) + + return(pers_ent) +} + +#' Vector other persons to be coerced as entities +#' @noRd +other_persons_entity <- function() { + entities <- c( + "conference", + "database-provider", + "institution", + "location", + "publisher" + ) + + entities +} + +#' This may be entities or persons +#' @noRd +entity_person <- function() { + forced <- c( + "editors", + "editors-series" + ) + forced +} diff --git a/R/utils.R b/R/utils.R index b8b3b7a7..06e6837f 100644 --- a/R/utils.R +++ b/R/utils.R @@ -24,7 +24,7 @@ clean_str <- function(str) { if (clean == "") { return(NULL) } - # Parse encoding + # Encoding enc <- Encoding(clean) if (enc != "UTF-8") clean <- iconv(clean, to = "UTF-8") diff --git a/tests/testthat/_snaps/as_cff_reference.md b/tests/testthat/_snaps/as_cff_reference.md index ae2bdf50..89902239 100644 --- a/tests/testthat/_snaps/as_cff_reference.md +++ b/tests/testthat/_snaps/as_cff_reference.md @@ -299,6 +299,13 @@ month: '1' year: '2050' +# Duplicates + + Code + uniq <- as_cff(bibend) + Message + i Removing duplicate objects. + # Article Code diff --git a/tests/testthat/test-as_cff_reference.R b/tests/testthat/test-as_cff_reference.R index a880c845..883d46e3 100644 --- a/tests/testthat/test-as_cff_reference.R +++ b/tests/testthat/test-as_cff_reference.R @@ -209,6 +209,28 @@ test_that("Fallback date", { expect_snapshot(init_cff) }) +test_that("Duplicates", { + bib <- bibentry("Misc", + title = "Test", + author = "Billy Jean", + date = "2050-01-12", + urldate = "2099-02-02", + publisher = "Random House", + type = "RANDOM" + ) + bib2 <- bibentry("Manual", + title = "Test", + author = "Billy Jean", + date = "2050-01-12", + urldate = "2099-02-02", + publisher = "Random House", + type = "RANDOM" + ) + bibend <- c(rep(bib, 3), bib2) + expect_snapshot(uniq <- as_cff(bibend)) + expect_length(uniq, 2) +}) + # Parse citation from BibTeX ---- test_that("Article", { From 8a03e1e389334cd66de0cdac8180213b8d7ce68d Mon Sep 17 00:00:00 2001 From: dieghernan Date: Wed, 6 Mar 2024 12:02:42 +0000 Subject: [PATCH 09/13] Review create and hooks --- R/as_cff.R | 15 +++- R/as_cff_person.R | 2 + R/assertions.R | 4 +- R/cff_create.R | 155 +++++++++++++++++++++++------------------- R/cff_gha_update.R | 6 +- R/cff_git_hook.R | 4 +- R/cff_write.R | 2 +- R/utils.R | 17 ++++- man/as_cff.Rd | 17 ++++- man/as_cff_person.Rd | 2 + man/cff_create.Rd | 6 +- man/cff_gha_update.Rd | 5 +- man/cff_git_hook.Rd | 4 +- 13 files changed, 148 insertions(+), 91 deletions(-) diff --git a/R/as_cff.R b/R/as_cff.R index 820617bf..f3eaa952 100644 --- a/R/as_cff.R +++ b/R/as_cff.R @@ -18,8 +18,15 @@ #' #' @returns #' -#' A `cff` object. These objects are rarely [valid][cff_validate()], but can -#' be used to complement or modify complete `cff` objects. +#' * `as_cff.person()` returns an object with classes `"cff_pers_list", "cff"`. +#' * `as_cff.bibentry()` and `as_cff.Bibtex()` returns an object with classes +#' `"cff_ref_list", "cff"`. +#' * The rest of methods returns usually an object of class `cff`. However if +#' `x` have an structure compatible with `definitions.person`, +#' `definitions.entity` or `definitions.reference` the object would have the +#' corresponding subclass. +#' +#' Learn more about the \CRANpkg{cffr} class system in [cff_class]. #' #' @family coercing #' @family s3method @@ -28,7 +35,7 @@ #' For `as_cff.bibentry()` / `as_cff.Bibtex()` see #' `vignette("bibtex_cff", "cffr")` to understand how the mapping is performed. #' -#' [as_cff_person()] is preferred over `as_cff.person()` since it can handle +#' [as_cff_person()] is preferred over `as_cff.person()`, since it can handle #' `character` person such as `"Davis, Jr., Sammy"`. For `person` objects both #' functions are similar. #' @@ -39,6 +46,8 @@ #' - [cff_read()]: Create a `cff` object from a external file. #' - [as_cff_person()]: Recommended way for creating persons in CFF format. #' +#' Learn more about the \CRANpkg{cffr} class system in [cff_class]. +#' #' @export #' #' @examples diff --git a/R/as_cff_person.R b/R/as_cff_person.R index 5e2260e4..e957c275 100644 --- a/R/as_cff_person.R +++ b/R/as_cff_person.R @@ -25,6 +25,8 @@ #' @seealso #' Examples in `vignette("cffr", "cffr")` and [utils::person()]. #' +#' Learn more about the \CRANpkg{cffr} class system in [cff_class]. +#' #' @export #' @rdname as_cff_person #' @name as_cff_person diff --git a/R/assertions.R b/R/assertions.R index 245dc9e1..fefb8dd3 100644 --- a/R/assertions.R +++ b/R/assertions.R @@ -94,7 +94,9 @@ stopifnotcff <- function(x) { ) } - if (tools::file_ext(x) != "cff") { + guess <- detect_x_source(x) + + if (guess != "cff_citation") { cli::cli_abort( "{.var x} is not a {.file *.cff} file" ) diff --git a/R/cff_create.R b/R/cff_create.R index b8792efb..14f04376 100644 --- a/R/cff_create.R +++ b/R/cff_create.R @@ -18,13 +18,13 @@ #' the [`cff`] object. It could be: #' * A missing value. That would retrieve the `DESCRIPTION` file on your #' in-development **R** package. -#' * An existing [`cff`] object, -#' * The name of an installed package (`"jsonlite"`), or +#' * An existing [`cff`] object. +#' * The name of an installed package (`"jsonlite"`). #' * Path to a `DESCRIPTION` file (`"./DESCRIPTION"`). #' #' @param keys #' List of additional keys to add to the [`cff`] object. See -#' **Details**. +#' [cff_modify(). #' @param cff_version The Citation File Format schema version that the #' `CITATION.cff` file adheres to for providing the citation metadata. #' @param gh_keywords Logical `TRUE/FALSE`. If the package is hosted on @@ -114,28 +114,50 @@ cff_create <- function(x, keys = list(), cff_version = "1.2.0", gh_keywords = TRUE, dependencies = TRUE, authors_roles = c("aut", "cre")) { - # On missing use package root - if (missing(x)) x <- getwd() + # Guess source + # On missing add getwd() + if (missing(x)) { + hint_source <- "indev" + x <- getwd() + } else if (identical(getwd(), x)) { + # This case is coming from cff_write + hint_source <- "indev" + } else { + hint_source <- detect_x_source(x) + } + + # Abort in non-valid sources + valid_sources <- c("indev", "cff_obj", "package", "description") + if (!hint_source %in% valid_sources) { + # Abort, prepare message + msg_hint <- switch(hint_source, + "dontknow" = paste0( + "If it is a package ", + "you may need to install it with ", + "{.fn install.packages}." + ), + "bib" = "Maybe try with {.fn cff_read}." + ) - if (!is_cff(x) && !is.character(x)) { - msg <- "{.arg x} should be a {.cls cff} or {.cls character} object." - cli::cli_abort(msg) + cli::cli_abort( + paste0("{.arg x} not valid. ", msg_hint) + ) } - # Detect sources and build cff - result_parsed <- detect_sources( + # Build cff and return paths if any + result_parsed <- build_cff_and_paths( x, cff_version, gh_keywords, - dependencies, authors_roles + dependencies, authors_roles, hint_source ) desc_path <- result_parsed[["desc_path"]] - instpack <- result_parsed[["instpack"]] cffobjend <- result_parsed[["cffobjend"]] # Add software dependencies if (dependencies) { + instpack <- as.character(installed.packages()[, "Package"]) deps <- parse_dependencies(desc_path, instpack) cffobjend$references <- unique(c(cffobjend$references, deps)) @@ -155,73 +177,68 @@ cff_create <- function(x, keys = list(), cff_version = "1.2.0", cffobjend } -detect_sources <- function(x, cff_version = "1.2.0", - gh_keywords = TRUE, dependencies = TRUE, - authors_roles = c("aut", "cre")) { - instpack <- as.character(installed.packages()[, "Package"]) +build_cff_and_paths <- function(x, cff_version = "1.2.0", + gh_keywords = TRUE, dependencies = TRUE, + authors_roles = c("aut", "cre"), hint_source) { + collect_list <- list( + desc_path = NULL, + cffobjend = NULL + ) - # Set initially citobj to NULL - citobj <- NULL - desc_path <- NULL + # "indev", "cff_obj", "package", "description" - # Paths + # Already cff, return it if (is_cff(x)) { # It is already an object - cffobj <- x + cffobj <- as_cff(as.list(x)) cffobj["cff-version"] <- cff_version - } else { - # Detect a package - if (x %in% instpack) x <- file.path(find.package(x), "DESCRIPTION") - # If is on the root create DESCRIPTION path - if (x == getwd()) x <- file.path(x, "DESCRIPTION") - - if (isTRUE(grep("DESCRIPTION", x) == 1)) { - # Call for a DESCRIPTION file - desc_path <- x - # Look if a CITATION file on inst/ folder - # for in-development packages - cit_path <- gsub("DESCRIPTION$", "inst/CITATION", x) - # If it doesn't exists look on the root - # this is for call to installed packages with system.file() - if (!file.exists(cit_path)) { - cit_path <- gsub("DESCRIPTION$", "CITATION", x) - } - if (file.exists(cit_path)) { - citobj <- cff_safe_read_citation(desc_path, cit_path) - if (length(citobj) == 0) citobj <- NULL - citobj <- drop_null(citobj) - citobj <- unname(citobj) - } - } else { - msg <- paste0( - "{.arg x} ({x}) not valid. If it is a package ", - "you may need to install it with ", - "{.fn install.packages}" - ) - cli::cli_abort(msg) - } - - if (!file.exists(desc_path)) { - cli::cli_abort("No {.file DESCRIPTION} file found with {.arg x}") - } - - cffobj <- cff_read_description(desc_path, cff_version, - gh_keywords = gh_keywords, - authors_roles = authors_roles - ) + + collect_list$cffobjend <- cffobj + return(collect_list) } - citobj <- unique(citobj) + # Get info from DESCRIPTION + desc_path <- switch(hint_source, + "indev" = file.path(getwd(), "DESCRIPTION"), + "description" = x, + "package" = system.file("DESCRIPTION", package = x) + ) + + if (is.null(file_path_or_null(desc_path))) { + cli::cli_abort("No {.file DESCRIPTION} file found with {.arg x}.") + } + + cffobj <- cff_read_description(desc_path, cff_version, + gh_keywords = gh_keywords, + authors_roles = authors_roles + ) - # Merge DESCRIPTION and CITATION - cffobjend <- merge_desc_cit(cffobj, citobj) + # Just for description case + try_get_citation <- function(x) { + cit1 <- file.path(dirname(x), "inst/CITATION") + cit2 <- file.path(dirname(x), "CITATION") - # Return collected info + c(file_path_or_null(cit1), file_path_or_null(cit2))[1] + } - list( - desc_path = desc_path, - instpack = instpack, - cffobjend = cffobjend + cit_path <- switch(hint_source, + "indev" = file.path(getwd(), "inst/CITATION"), + "description" = try_get_citation(x), + "package" = system.file("CITATION", package = x) ) + + cit_path <- file_path_or_null(cit_path[1]) + + if (!is.null(cit_path)) { + citobj <- cff_safe_read_citation(desc_path, cit_path) + citobj <- unique(citobj) + # Merge DESCRIPTION and CITATION + cffobj <- merge_desc_cit(cffobj, citobj) + } + + collect_list$desc_path <- desc_path + collect_list$cffobjend <- cffobj + + return(collect_list) } diff --git a/R/cff_gha_update.R b/R/cff_gha_update.R index 5d53b0bc..867f064f 100644 --- a/R/cff_gha_update.R +++ b/R/cff_gha_update.R @@ -2,9 +2,9 @@ #' #' @description #' -#' This function would install a -#' [GitHub Action](https://github.com/features/actions) on your repo. The action -#' will update your `CITATION.cff` when any of these events occur: +#' This function would install a [GitHub +#' Action](https://github.com/features/actions) on your repo. The action will +#' update your `CITATION.cff` when any of these events occur: #' - You publish a new release of the package. #' - Your `DESCRIPTION` or `inst/CITATION` are modified. #' - The action can be run also manually. diff --git a/R/cff_git_hook.R b/R/cff_git_hook.R index 55e1bbd9..a2304d9e 100644 --- a/R/cff_git_hook.R +++ b/R/cff_git_hook.R @@ -53,8 +53,8 @@ #' If you are using **RStudio** you can run also this command from a **R** #' script by selecting that line and sending it to the terminal using: #' -#' - `Ctrl+Alt+Enter` (Windows & Linux), or -#' - `Cmd+Option+Return` (Mac). +#' - Windows & Linux: `Ctrl+Alt+Enter`. +#' - Mac: `Cmd+Option+Return`. #' #' # Removing the git pre-commit hook #' diff --git a/R/cff_write.R b/R/cff_write.R index d1dd21e2..ad46a02a 100644 --- a/R/cff_write.R +++ b/R/cff_write.R @@ -70,7 +70,7 @@ cff_write <- function(x, outfile = "CITATION.cff", keys = list(), cff_version = "1.2.0", gh_keywords = TRUE, dependencies = TRUE, validate = TRUE, verbose = TRUE, authors_roles = c("aut", "cre")) { - # On missing use package root + # # On missing use NULL if (missing(x)) x <- getwd() citat <- cff_create(x, diff --git a/R/utils.R b/R/utils.R index 06e6837f..a6071318 100644 --- a/R/utils.R +++ b/R/utils.R @@ -216,10 +216,14 @@ guess_cff_part <- function(x) { detect_x_source <- function(x) { - if (missing(x)) { + if (any(missing(x), is.null(x))) { return("indev") } + if (is_cff(x)) { + return("cff_obj") + } + x <- as.character(x)[1] instpack <- as.character(installed.packages()[, "Package"]) @@ -257,3 +261,14 @@ match_cff_arg <- function(arg, valid, for_msg, call = environment()) { return(arg) } + +file_path_or_null <- function(x) { + x_c <- clean_str(x) + if (is.null(x_c)) { + return(x) + } + if (file.exists(x)) { + return(x) + } + return(NULL) +} diff --git a/man/as_cff.Rd b/man/as_cff.Rd index 79595e74..85bd0431 100644 --- a/man/as_cff.Rd +++ b/man/as_cff.Rd @@ -29,8 +29,17 @@ list.} \item{...}{Additional arguments to be passed on to other methods.} } \value{ -A \code{cff} object. These objects are rarely \link[=cff_validate]{valid}, but can -be used to complement or modify complete \code{cff} objects. +\itemize{ +\item \code{as_cff.person()} returns an object with classes \verb{"cff_pers_list", "cff"}. +\item \code{as_cff.bibentry()} and \code{as_cff.Bibtex()} returns an object with classes +\verb{"cff_ref_list", "cff"}. +\item The rest of methods returns usually an object of class \code{cff}. However if +\code{x} have an structure compatible with \code{definitions.person}, +\code{definitions.entity} or \code{definitions.reference} the object would have the +corresponding subclass. +} + +Learn more about the \CRANpkg{cffr} class system in \link{cff_class}. } \description{ \code{as_cff()} turns an existing list-like \strong{R} object into a so-called @@ -48,7 +57,7 @@ be used to complement or modify complete \code{cff} objects. For \code{as_cff.bibentry()} / \code{as_cff.Bibtex()} see \code{vignette("bibtex_cff", "cffr")} to understand how the mapping is performed. -\code{\link[=as_cff_person]{as_cff_person()}} is preferred over \code{as_cff.person()} since it can handle +\code{\link[=as_cff_person]{as_cff_person()}} is preferred over \code{as_cff.person()}, since it can handle \code{character} person such as \code{"Davis, Jr., Sammy"}. For \code{person} objects both functions are similar. } @@ -89,6 +98,8 @@ as_cff(a_cit) \item \code{\link[=as_cff_person]{as_cff_person()}}: Recommended way for creating persons in CFF format. } +Learn more about the \CRANpkg{cffr} class system in \link{cff_class}. + Other functions for converting between \strong{R} classes: \code{\link{as_bibentry}()}, \code{\link{as_cff_person}()} diff --git a/man/as_cff_person.Rd b/man/as_cff_person.Rd index 5b2921a8..e2338db4 100644 --- a/man/as_cff_person.Rd +++ b/man/as_cff_person.Rd @@ -169,6 +169,8 @@ See \strong{Examples} for more information. \seealso{ Examples in \code{vignette("cffr", "cffr")} and \code{\link[utils:person]{utils::person()}}. +Learn more about the \CRANpkg{cffr} class system in \link{cff_class}. + Other functions for converting between \strong{R} classes: \code{\link{as_bibentry}()}, \code{\link{as_cff}()} diff --git a/man/cff_create.Rd b/man/cff_create.Rd index ab413b1e..c6218aba 100644 --- a/man/cff_create.Rd +++ b/man/cff_create.Rd @@ -19,13 +19,13 @@ the \code{\link{cff}} object. It could be: \itemize{ \item A missing value. That would retrieve the \code{DESCRIPTION} file on your in-development \strong{R} package. -\item An existing \code{\link{cff}} object, -\item The name of an installed package (\code{"jsonlite"}), or +\item An existing \code{\link{cff}} object. +\item The name of an installed package (\code{"jsonlite"}). \item Path to a \code{DESCRIPTION} file (\code{"./DESCRIPTION"}). }} \item{keys}{List of additional keys to add to the \code{\link{cff}} object. See -\strong{Details}.} +[cff_modify().} \item{cff_version}{The Citation File Format schema version that the \code{CITATION.cff} file adheres to for providing the citation metadata.} diff --git a/man/cff_gha_update.Rd b/man/cff_gha_update.Rd index c45049d9..195b11e7 100644 --- a/man/cff_gha_update.Rd +++ b/man/cff_gha_update.Rd @@ -16,9 +16,8 @@ action?} Invisible, this function is called by its side effects. } \description{ -This function would install a -\href{https://github.com/features/actions}{GitHub Action} on your repo. The action -will update your \code{CITATION.cff} when any of these events occur: +This function would install a \href{https://github.com/features/actions}{GitHub Action} on your repo. The action will +update your \code{CITATION.cff} when any of these events occur: \itemize{ \item You publish a new release of the package. \item Your \code{DESCRIPTION} or \code{inst/CITATION} are modified. diff --git a/man/cff_git_hook.Rd b/man/cff_git_hook.Rd index 85a7f720..95488a9e 100644 --- a/man/cff_git_hook.Rd +++ b/man/cff_git_hook.Rd @@ -48,8 +48,8 @@ on the terminal. If you are using \strong{RStudio} you can run also this command from a \strong{R} script by selecting that line and sending it to the terminal using: \itemize{ -\item \code{Ctrl+Alt+Enter} (Windows & Linux), or -\item \code{Cmd+Option+Return} (Mac). +\item Windows & Linux: \code{Ctrl+Alt+Enter}. +\item Mac: \code{Cmd+Option+Return}. } } From 40a268aafe1e6bd2464a87fb9b642485e830021e Mon Sep 17 00:00:00 2001 From: Diego H Date: Wed, 6 Mar 2024 22:31:50 +0100 Subject: [PATCH 10/13] Review up to cff_read --- R/as_cff_person.R | 3 +- R/cff_modify.R | 15 ++-- R/cff_read.R | 104 +++++----------------------- R/cff_read_bib_text.R | 16 ++++- R/methods.R | 2 - R/utils-methods.R | 23 ------ R/utils.R | 57 +++++++++++++++ man/as_cff_person.Rd | 3 +- man/cff_modify.Rd | 11 +-- man/cff_read.Rd | 17 +++-- man/cff_read_bib_text.Rd | 8 ++- tests/testthat/_snaps/cff_modify.md | 38 ++++++++++ tests/testthat/test-cff_modify.R | 39 +++++++++++ 13 files changed, 198 insertions(+), 138 deletions(-) create mode 100644 tests/testthat/_snaps/cff_modify.md create mode 100644 tests/testthat/test-cff_modify.R diff --git a/R/as_cff_person.R b/R/as_cff_person.R index e957c275..88142d7c 100644 --- a/R/as_cff_person.R +++ b/R/as_cff_person.R @@ -41,7 +41,8 @@ #' #' @return #' `as_cff_person()` returns an object of classes `"cff_pers_list", "cff"` -#' according to the +#' according to the `definitions.person` or `definitions.entity` specified in +#' the #' ```{r, echo=FALSE, results='asis'} #' #' cat(paste0(" [Citation File Format schema]", diff --git a/R/cff_modify.R b/R/cff_modify.R index 757940df..e538c432 100644 --- a/R/cff_modify.R +++ b/R/cff_modify.R @@ -3,12 +3,12 @@ #' Add new keys or modify existing ones on a [`cff`] object. #' #' @param x A [`cff`] object. -#' @param ... Named arguments to be used for modifying `x`. See also [cff()]. +#' @param ... Named arguments to be used for modifying `x`. See also `...` +#' argument in [cff()]. #' #' @details #' -#' If any key provided in `...` is present in `x`, the result would have the -#' key provided in `...`. +#' Keys provided in `...` would override the corresponding key in `x`. #' #' @returns #' @@ -21,10 +21,6 @@ #' #' See [cff()] for creating [`cff`] objects from scratch. #' -#' -#' -#' @export -#' @family core #' @examples #' x <- cff() #' x @@ -76,7 +72,10 @@ modify_cff <- function(x, keys, argname = "...") { # Name order sorted_nm <- unique(c(init_ord, names(xend))) - as_cff(xend[sorted_nm]) + + # Relist and add classes + xend <- as.list(xend[sorted_nm]) + as_cff(xend) } diff --git a/R/cff_read.R b/R/cff_read.R index e61e34b2..8a175c94 100644 --- a/R/cff_read.R +++ b/R/cff_read.R @@ -41,17 +41,25 @@ #' [yaml::read_yaml()], [bibtex::read.bib()], etc.). #' #' @return -#' A [`cff`] object. In the case of [cff_read_cff_citation()] and -#' [cff_read_description()] a full and (potentially) valid `cff` object. #' +#' * `cff_read_cff_citation()` and `cff_read_description()` returns a object +#' with class `cff`. +#' * `cff_read_citation()` and `cff_read_bib()` returns an object of classes +#' `"cff_ref_list", "cff"` according to the `definitions.references` +#' specified in the +#' ```{r, echo=FALSE, results='asis'} #' -#' In the case of [cff_read_bib()] and [cff_read_citation()], the result is -#' the `cff` version of a [bibentry()] object (i.e. a bibliographic reference), -#' that can be used to complement another `cff` object. See -#' `vignette("bibtex_cff", "cffr")` to get further insights on how this -#' conversion is performed. +#' cat(paste0(" [Citation File Format schema]", +#' "(https://github.com/citation-file-format/", +#' "citation-file-format/blob/main/schema-guide.md).")) #' #' +#' ``` +#' See `vignette("bibtex_cff", "cffr")` to get further insights on how this +#' conversion is performed. +#' +#' Learn more about the \CRANpkg{cffr} class system in [cff_class]. +#' #' @references #' #' - R Core Team (2023). _Writing R Extensions_. @@ -118,9 +126,9 @@ cff_read <- function(path, ...) { ) ) } - filetype <- guess_type_file(path) + filetype <- detect_x_source(path) - if (is.null(filetype)) { + if (filetype == "dontknow") { cli::cli_abort( paste0( "Don't recognize the file type of {.file {path}}.", @@ -134,7 +142,7 @@ cff_read <- function(path, ...) { "description" = cff_read_description(path, ...), "bib" = cff_read_bib(path, ...), "citation" = cff_read_citation(path, ...), - NULL + cli::cli_abort("Don't know how to read {.val {x}}.") ) endobj @@ -315,78 +323,4 @@ cff_safe_read_citation <- function(desc_path, cit_path) { tocff } -# Helpers ---- - -guess_type_file <- function(path) { - if (grepl("\\.cff$", path, ignore.case = TRUE)) { - return("cff_citation") - } - if (grepl("\\.bib$", path, ignore.case = TRUE)) { - return("bib") - } - if (grepl("citat", path, ignore.case = TRUE)) { - return("citation") - } - if (grepl("desc", path, ignore.case = TRUE)) { - return("description") - } - - return(NULL) -} - -#' Parse and clean data from DESCRIPTION to create metadata -#' @noRd -clean_package_meta <- function(meta) { - if (!inherits(meta, "packageDescription")) { - # Add encoding - meta <- list() - meta$Encoding <- "UTF-8" - return(meta) - } - - # Convert to a desc object - - # First write to a dcf file - tmp <- tempfile("DESCRIPTION") - meta_unl <- unclass(meta) - write.dcf(meta_unl, tmp) - pkg <- desc::desc(tmp) - pkg$coerce_authors_at_r() - # Extract package data - meta <- pkg$get(desc::cran_valid_fields) - - # Clean missing and drop empty fields - meta <- drop_null(lapply(meta, clean_str)) - - # Check encoding - if (!is.null(meta$Encoding)) { - meta <- lapply(meta, iconv, from = meta$Encoding, to = "UTF-8") - } else { - meta$Encoding <- "UTF-8" - } - unlink(tmp, force = TRUE) - meta -} - - - -# Convert a DESCRIPTION object to meta object using desc package -desc_to_meta <- function(x) { - src <- x - my_meta <- desc::desc(src) - my_meta$coerce_authors_at_r() - - - # As list - my_meta_l <- my_meta$get(desc::cran_valid_fields) - my_meta_l <- as.list(my_meta_l) - v_nas <- vapply(my_meta_l, is.na, logical(1)) - my_meta_l <- my_meta_l[!v_nas] - - meta_proto <- packageDescription("cffr") - - class(my_meta_l) <- class(meta_proto) - attr(my_meta_l, "file") <- x - - my_meta_l -} +# See utils.R diff --git a/R/cff_read_bib_text.R b/R/cff_read_bib_text.R index e615632a..0521080f 100644 --- a/R/cff_read_bib_text.R +++ b/R/cff_read_bib_text.R @@ -7,6 +7,7 @@ #' @family bibtex #' @family reading #' @seealso +#' #' [cff_read_bib()] for reading `*.bib` files. #' #' @export @@ -18,10 +19,23 @@ #' #' @return #' -#' ```{r child = "man/chunks/value.Rmd"} +#' An object of classes `"cff_ref_list", "cff"` according to the +#' `definitions.references` specified in +#' the +#' ```{r, echo=FALSE, results='asis'} +#' +#' cat(paste0(" [Citation File Format schema]", +#' "(https://github.com/citation-file-format/", +#' "citation-file-format/blob/main/schema-guide.md). ")) +#' +#' #' ``` +#' Each element of the `"cff_ref_list", "cff"` object would have classes +#' `"cff_ref", "cff"`. Learn more about the \CRANpkg{cffr} class system in +#' [cff_class]. #' #' @details +#' #' This is a helper function that writes `x` to a `*.bib` file and reads it with #' [cff_read_bib()]. #' diff --git a/R/methods.R b/R/methods.R index 618eba07..126a55b9 100644 --- a/R/methods.R +++ b/R/methods.R @@ -44,8 +44,6 @@ as.data.frame.cff <- function(x, row.names = NULL, optional = FALSE, ...) { }) the_df <- do.call(cbind, end_df) - } else if (is.null(names(x))) { - the_df <- cff_list_to_df(x) } else { the_df <- cff_to_df(x) } diff --git a/R/utils-methods.R b/R/utils-methods.R index 8a0a0d9e..9cf9a4a8 100644 --- a/R/utils-methods.R +++ b/R/utils-methods.R @@ -175,26 +175,3 @@ cff_to_df <- function(x) { return(final_df) } - -cff_list_to_df <- function(x) { - # Applicable to lists of persons or references - # Guess type - if (!"type" %in% names(x[[1]])) { - guess <- "person" - } else { - guess <- "reference" - } - - - x_len <- seq_len(length(x)) - df_l <- lapply(x_len, function(y) { - df <- as.data.frame(x[y]) - newnames <- paste0(guess, ".", sprintf("%02d", y - 1), ".", names(df)) - names(df) <- newnames - - df - }) - - df_end <- df_list_to_df(df_l) - df_end -} diff --git a/R/utils.R b/R/utils.R index a6071318..f69a0201 100644 --- a/R/utils.R +++ b/R/utils.R @@ -272,3 +272,60 @@ file_path_or_null <- function(x) { } return(NULL) } + +#' Parse and clean data from DESCRIPTION to create metadata +#' @noRd +clean_package_meta <- function(meta) { + if (!inherits(meta, "packageDescription")) { + # Add encoding + meta <- list() + meta$Encoding <- "UTF-8" + return(meta) + } + + # Convert to a desc object + + # First write to a dcf file + tmp <- tempfile("DESCRIPTION") + meta_unl <- unclass(meta) + write.dcf(meta_unl, tmp) + pkg <- desc::desc(tmp) + pkg$coerce_authors_at_r() + # Extract package data + meta <- pkg$get(desc::cran_valid_fields) + + # Clean missing and drop empty fields + meta <- drop_null(lapply(meta, clean_str)) + + # Check encoding + if (!is.null(meta$Encoding)) { + meta <- lapply(meta, iconv, from = meta$Encoding, to = "UTF-8") + } else { + meta$Encoding <- "UTF-8" + } + unlink(tmp, force = TRUE) + meta +} + + + +# Convert a DESCRIPTION object to meta object using desc package +desc_to_meta <- function(x) { + src <- x + my_meta <- desc::desc(src) + my_meta$coerce_authors_at_r() + + + # As list + my_meta_l <- my_meta$get(desc::cran_valid_fields) + my_meta_l <- as.list(my_meta_l) + v_nas <- vapply(my_meta_l, is.na, logical(1)) + my_meta_l <- my_meta_l[!v_nas] + + meta_proto <- packageDescription("cffr") + + class(my_meta_l) <- class(meta_proto) + attr(my_meta_l, "file") <- x + + my_meta_l +} diff --git a/man/as_cff_person.Rd b/man/as_cff_person.Rd index e2338db4..99df2b3f 100644 --- a/man/as_cff_person.Rd +++ b/man/as_cff_person.Rd @@ -24,7 +24,8 @@ See \strong{Examples}. } \value{ \code{as_cff_person()} returns an object of classes \verb{"cff_pers_list", "cff"} -according to the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. +according to the \code{definitions.person} or \code{definitions.entity} specified in +the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. Each element of the \verb{"cff_pers_list", "cff"} object would have classes \verb{"cff_pers", "cff"}. Learn more about the \CRANpkg{cffr} class system in \link{cff_class}. diff --git a/man/cff_modify.Rd b/man/cff_modify.Rd index 69173071..991eda9c 100644 --- a/man/cff_modify.Rd +++ b/man/cff_modify.Rd @@ -9,7 +9,8 @@ cff_modify(x, ...) \arguments{ \item{x}{A \code{\link{cff}} object.} -\item{...}{Named arguments to be used for modifying \code{x}. See also \code{\link[=cff]{cff()}}.} +\item{...}{Named arguments to be used for modifying \code{x}. See also \code{...} +argument in \code{\link[=cff]{cff()}}.} } \value{ A \code{\link{cff}} object. @@ -18,8 +19,7 @@ A \code{\link{cff}} object. Add new keys or modify existing ones on a \code{\link{cff}} object. } \details{ -If any key provided in \code{...} is present in \code{x}, the result would have the -key provided in \code{...}. +Keys provided in \code{...} would override the corresponding key in \code{x}. } \examples{ x <- cff() @@ -43,11 +43,6 @@ This function is wrapper of \code{\link[utils:modifyList]{utils::modifyList()}}. See \code{\link[=cff]{cff()}} for creating \code{\link{cff}} objects from scratch. -Other core functions of \CRANpkg{cffr}: -\code{\link{cff}()}, -\code{\link{cff_create}()}, -\code{\link{cff_validate}()} - Other core functions of \CRANpkg{cffr}: \code{\link{cff}()}, \code{\link{cff_create}()}, diff --git a/man/cff_read.Rd b/man/cff_read.Rd index 5a7fef5e..0ee5cffe 100644 --- a/man/cff_read.Rd +++ b/man/cff_read.Rd @@ -45,15 +45,18 @@ generating the \code{\link{cff}} object.} \item{encoding}{Encoding to be assumed for \code{path}. See \code{\link[=readLines]{readLines()}}.} } \value{ -A \code{\link{cff}} object. In the case of \code{\link[=cff_read_cff_citation]{cff_read_cff_citation()}} and -\code{\link[=cff_read_description]{cff_read_description()}} a full and (potentially) valid \code{cff} object. - -In the case of \code{\link[=cff_read_bib]{cff_read_bib()}} and \code{\link[=cff_read_citation]{cff_read_citation()}}, the result is -the \code{cff} version of a \code{\link[=bibentry]{bibentry()}} object (i.e. a bibliographic reference), -that can be used to complement another \code{cff} object. See -\code{vignette("bibtex_cff", "cffr")} to get further insights on how this +\itemize{ +\item \code{cff_read_cff_citation()} and \code{cff_read_description()} returns a object +with class \code{cff}. +\item \code{cff_read_citation()} and \code{cff_read_bib()} returns an object of classes +\verb{"cff_ref_list", "cff"} according to the \code{definitions.references} +specified in the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. +See \code{vignette("bibtex_cff", "cffr")} to get further insights on how this conversion is performed. } + +Learn more about the \CRANpkg{cffr} class system in \link{cff_class}. +} \description{ Read files and convert them to \code{\link{cff}} objects. Files supported are: diff --git a/man/cff_read_bib_text.Rd b/man/cff_read_bib_text.Rd index bebb9aa7..5ea7cdfd 100644 --- a/man/cff_read_bib_text.Rd +++ b/man/cff_read_bib_text.Rd @@ -18,8 +18,12 @@ cff_read_bib_text(x, encoding = "UTF-8", ...) }} } \value{ -A \code{\link[=cff_class]{cff}} object ready to be used with other functions (i.e. -\code{\link[=cff_create]{cff_create()}}. +An object of classes \verb{"cff_ref_list", "cff"} according to the +\code{definitions.references} specified in +the \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Citation File Format schema}. +Each element of the \verb{"cff_ref_list", "cff"} object would have classes +\verb{"cff_ref", "cff"}. Learn more about the \CRANpkg{cffr} class system in +\link{cff_class}. } \description{ Convert a \code{\link[=character]{character}} representing a BibTeX entry to a diff --git a/tests/testthat/_snaps/cff_modify.md b/tests/testthat/_snaps/cff_modify.md new file mode 100644 index 00000000..f01c351d --- /dev/null +++ b/tests/testthat/_snaps/cff_modify.md @@ -0,0 +1,38 @@ +# Errors and messages + + Code + cff_modify(a_list, abstract = "An abstract") + Condition + Error in `cff_modify()`: + ! `x` should be a object, not . + +--- + + Code + xend <- cff_modify(a_cff) + Message + i Args `...` empty. Returning `x`. + +--- + + Code + dup <- cff_modify(a_cff, abstract = "a", abstract = "b") + Message + ! Removing duplicated keys. + +--- + + Code + cff_modify(a_cff, "a", "b") + Condition + Error in `validate_extra_keys()`: + ! Elements in `...` should be named. + +--- + + Code + mod <- cff_modify(a_cff, "a", abstract = "b") + Message + ! Found 1 not-named argument in position 1. + i Removing unnamed arguments + diff --git a/tests/testthat/test-cff_modify.R b/tests/testthat/test-cff_modify.R new file mode 100644 index 00000000..619ef81f --- /dev/null +++ b/tests/testthat/test-cff_modify.R @@ -0,0 +1,39 @@ +test_that("Errors and messages", { + a_cff <- cff() + a_list <- as.list(a_cff) + expect_true(inherits(a_list, "list")) + expect_false(is_cff(a_list)) + expect_snapshot(cff_modify(a_list, abstract = "An abstract"), + error = TRUE + ) + + # Nothing provided + expect_snapshot(xend <- cff_modify(a_cff)) + expect_identical(xend, a_cff) + + # But silent in cff create + expect_silent(aa <- cff_create("testthat", keys = NULL)) + + # Duplicate args + expect_snapshot(dup <- cff_modify(a_cff, abstract = "a", abstract = "b")) + expect_length(dup, length(a_cff) + 1) + expect_identical(dup$abstract, "a") + expect_snapshot(cff_modify(a_cff, "a", "b"), error = TRUE) + expect_snapshot(mod <- cff_modify(a_cff, "a", abstract = "b")) + expect_length(mod, length(a_cff) + 1) + expect_identical(mod$abstract, "b") +}) + +test_that("Can reclass", { + a_cff <- cff() + + mod <- cff_modify(a_cff, contact = list(list( + name = "a contact", + address = "here" + ))) + + + expect_true(cff_validate(mod, verbose = FALSE)) + expect_s3_class(mod$contact, c("cff_pers_list", "cff"), exact = TRUE) + expect_s3_class(mod$contact[1], c("cff_pers", "cff"), exact = TRUE) +}) From 209ddc6d6be6a4369c29139d7d811abb204e71d0 Mon Sep 17 00:00:00 2001 From: Diego H Date: Thu, 7 Mar 2024 14:30:20 +0100 Subject: [PATCH 11/13] Left methods and utils --- .lintr | 2 +- NEWS.md | 2 +- R/as_bibentry.R | 8 + R/as_cff_reference.R | 282 --------- R/assertions.R | 46 +- R/cff.R | 10 +- R/cff_create.R | 6 +- R/cff_gha_update.R | 4 +- R/cff_git_hook.R | 2 +- R/cff_read.R | 47 +- R/cff_validate.R | 85 ++- R/cff_write.R | 2 +- R/cff_write_misc.R | 43 +- R/deprecated.R | 2 +- R/methods.R | 13 +- R/utils-alerts.R | 88 +++ R/utils-create.R | 6 +- R/utils-persons.R | 10 +- R/utils-read-description.R | 2 +- R/utils.R | 18 +- README.Rmd | 2 +- README.md | 69 ++- codemeta.json | 4 +- data-raw/test-2as_bibentry.R | 510 ++++++++++++++++ data-raw/test2-as_cff_reference.R | 235 ++++++++ data/cran_to_spdx.rda | Bin 907 -> 916 bytes inst/WORDLIST | 1 - inst/schemaorg.json | 2 +- man/cff_validate.Rd | 13 +- man/cff_write_misc.Rd | 4 +- tests/testthat/_snaps/as_bibentry.md | 700 ++-------------------- tests/testthat/_snaps/as_cff_person.md | 8 +- tests/testthat/_snaps/as_cff_reference.md | 428 ++++--------- tests/testthat/_snaps/cff_create.md | 49 +- tests/testthat/_snaps/cff_gha_update.md | 23 + tests/testthat/_snaps/cff_read.md | 20 +- tests/testthat/_snaps/methods.md | 14 + tests/testthat/_snaps/utils-alerts.md | 56 ++ tests/testthat/test-as_bibentry.R | 611 +++---------------- tests/testthat/test-as_cff_person.R | 8 +- tests/testthat/test-as_cff_reference.R | 462 +++++--------- tests/testthat/test-assertions.R | 74 ++- tests/testthat/test-cff.R | 3 +- tests/testthat/test-cff_create.R | 177 +++--- tests/testthat/test-cff_gha_update.R | 41 ++ tests/testthat/test-cff_read.R | 59 +- tests/testthat/test-cff_validate.R | 6 - tests/testthat/test-cff_write.R | 24 +- tests/testthat/test-cff_write_misc.R | 12 +- tests/testthat/test-merge_desc_cit.R | 4 +- tests/testthat/test-methods.R | 18 + tests/testthat/test-mock-package.R | 10 +- tests/testthat/test-utils-alerts.R | 57 ++ tests/testthat/test_ci/test-full_cff.R | 2 +- tests/testthat/test_ci/test-new.R | 1 - vignettes/bibtex_cff.Rmd | 8 +- vignettes/cffr.Rmd | 13 +- vignettes/crosswalk.Rmd | 10 +- 58 files changed, 1889 insertions(+), 2527 deletions(-) create mode 100644 R/utils-alerts.R create mode 100644 data-raw/test-2as_bibentry.R create mode 100644 data-raw/test2-as_cff_reference.R create mode 100644 tests/testthat/_snaps/cff_gha_update.md create mode 100644 tests/testthat/_snaps/utils-alerts.md create mode 100644 tests/testthat/test-cff_gha_update.R create mode 100644 tests/testthat/test-utils-alerts.R diff --git a/.lintr b/.lintr index add4d27e..7d291762 100644 --- a/.lintr +++ b/.lintr @@ -2,7 +2,7 @@ linters: linters_with_defaults() # see vignette("lintr") encoding: "UTF-8" exclusions: list( "data-raw", - "tests/testthat/test_ci/test-full_cff.R", + "tests/testthat/test_ci", "vignettes/cffr.Rmd", "vignettes/bibtex_cff.Rmd" ) diff --git a/NEWS.md b/NEWS.md index f8e31ec0..95678225 100644 --- a/NEWS.md +++ b/NEWS.md @@ -31,7 +31,7 @@ package. - `as_cff.Bibtex()`. - `as_cff.bibentry()`, replacing cff_parse_citation(). - `as_cff.person()`, similar to `as_cff_person()` but only for `person` - objects. We recommend using `as_cff_person()` since it can parse also + objects. We recommend using `as_cff_person()` since it can coerce also string representing authors in BibTeX markup (`"{von Neumen}, James"`), that can't be captured properly via methods. - New `as_bibentry()` method for a variety of classes (`character`, `list`, diff --git a/R/as_bibentry.R b/R/as_bibentry.R index c5b9e3b4..95ee8cb0 100644 --- a/R/as_bibentry.R +++ b/R/as_bibentry.R @@ -192,6 +192,8 @@ as_bibentry.list <- function(x, ...) { # Unlist easy to undo the do.call effect bib <- bib[[1]] + + bib } @@ -224,6 +226,12 @@ as_bibentry.cff <- function(x, ..., ) if (is.null(obj_extract)) { + cli::cli_alert_warning( + paste0( + "In {.arg x} didn't find anything with {.arg what} = {.val {what}}. ", + "Returning empty {.cls bibentry}." + ) + ) return(bibentry()) } diff --git a/R/as_cff_reference.R b/R/as_cff_reference.R index 45f77573..000bdefd 100644 --- a/R/as_cff_reference.R +++ b/R/as_cff_reference.R @@ -117,285 +117,3 @@ make_cff_reference <- function(bib) { return(cit_list) } - -#' Extract and map BibTeX entry -#' @noRd -get_bibtex_entry <- function(bib) { - # Unclass and manage entry type - # Extract type from BibTeX - init_type <- attr(unclass(bib)[[1]], "bibtype") - init_type <- clean_str(tolower(init_type)) - - - cit_list <- drop_null(unclass(bib)[[1]]) - - # Add fields - cit_list$bibtex_entry <- init_type - - # Manage type from BibTeX and convert to CFF - # This overwrite the BibTeX type field. Not treated by this function - cit_list$type <- switch(init_type, - "article" = "article", - "book" = "book", - "booklet" = "pamphlet", - "conference" = "conference-paper", - "inbook" = "book", - # "incollection" = , - "inproceedings" = "conference-paper", - "manual" = "manual", - "mastersthesis" = "thesis", - "misc" = "generic", - "phdthesis" = "thesis", - "proceedings" = "proceedings", - "techreport" = "report", - "unpublished" = "unpublished", - "generic" - ) - - - # Check if it an inbook with booktitle (BibLaTeX style) - if (all(init_type == "inbook", "booktitle" %in% names(cit_list))) { - # Make it incollection - cit_list$bibtex_entry <- "incollection" - cit_list$type <- "generic" - } - - - return(cit_list) -} - -#' Adapt names from R citation()/BibTeX to cff format -#' @noRd -get_bibtex_fields <- function(cit_list) { - # to lowercase - names(cit_list) <- tolower(names(cit_list)) - nm <- names(cit_list) - # Standard BibTeX fields: - # address annote author booktitle chapter crossref edition editor - # howpublished institution journal key month note number organization pages - # publisher school series title type year - - # No mapping needed (direct mapping) - # edition journal month publisher title volume year - - # Mapped: - # author booktitle series chapter editor howpublished note number - - nm[nm == "author"] <- "authors" - # Make collection title - # booktitle takes precedence over series - nm[nm == "booktitle"] <- "collection-title" - if (!"collection-title" %in% nm) { - nm[nm == "series"] <- "collection-title" - } - nm[nm == "chapter"] <- "section" - nm[nm == "editor"] <- "editors" - nm[nm == "howpublished"] <- "medium" - nm[nm == "note"] <- "notes" - nm[nm == "number"] <- "issue" - nm[nm == "address"] <- "location" - nm[nm == "pages"] <- "bibtex_pages" # This would be removed later - - # Get some fields from BibLaTeX - nm[nm == "date"] <- "date-published" - nm[nm == "file"] <- "filename" - nm[nm == "issuetitle"] <- "issue-title" - nm[nm == "translator"] <- "translators" - nm[nm == "urldate"] <- "date-accessed" - nm[nm == "pagetotal"] <- "pages" - - # Other BibLaTeX fields that does not require any mapping - # abstract, doi, isbn, issn, url, version - - - # Keywords may be duplicated, unify - if ("keywords" %in% nm) { - kwords <- unlist(cit_list["keywords" == nm]) - kwords <- clean_str(paste(kwords, collapse = ", ")) - kwords <- trimws(unique(unlist(strsplit(kwords, ",|;")))) - cit_list$keywords <- unique(kwords) - } - - # Not mapped: - # annote crossref key organization series type - # - # Fields address, organization, series and type are treated on - # main function - # key is a special field, treated apart - # Fields ignored: annote, crossref - - names(cit_list) <- nm - - # Additionally, need to delete keywords if length is less than 2, - # errors on validation - if (length(cit_list$keywords) < 2) { - cit_list$keywords <- NULL - } - - # Treat location ---- - - loc <- cit_list$location - - if (!is.null(loc)) cit_list$location <- loc - - - - # Treat additional dates ---- - dpub <- clean_str(cit_list$`date-published`) - cit_list$`date-published` <- clean_str(as.Date(dpub, optional = TRUE)) - - datacc <- clean_str(cit_list$`date-accessed`) - cit_list$`date-accessed` <- clean_str(as.Date(datacc, optional = TRUE)) - - # Treat pages - - pages <- cit_list$bibtex_pages - if (!is.null(pages)) { - spl <- unlist(strsplit(pages, "--")) - - cit_list$start <- spl[1] - - if (length(spl) > 1) cit_list$end <- paste(spl[-1], collapse = "--") - } - - return(cit_list) -} - -#' Modify mapping of some org. fields on BibTeX to CFF -#' @noRd -get_bibtex_inst <- function(field_list) { - # Initial values - bibtex_entry <- field_list$bibtex_entry - to_replace <- switch(bibtex_entry, - "mastersthesis" = "school", - "phdthesis" = "school", - "conference" = "organization", - "inproceedings" = "organization", - "manual" = "organization", - "proceedings" = "organization", - "institution" - ) - - if (to_replace == "institution") { - return(field_list) - } - - # Rest of cases remove bibtex institution and rename - nms <- names(field_list) - - field_list <- field_list["institution" != nms] - - # Rename - nms2 <- names(field_list) - nms2[nms2 == to_replace] <- "institution" - names(field_list) <- nms2 - - field_list -} - -add_conference <- function(field_list) { - bibtex_entry <- field_list$bibtex_entry - - if (bibtex_entry %in% c("conference", "inproceedings", "proceedings")) { - field_list$conference <- field_list$`collection-title` - } - return(field_list) -} - - - - -#' Adapt cff keys to bibtex entries -#' @noRd -add_thesis <- function(cit_list) { - bibtex_entry <- cit_list$bibtex_entry - if (!bibtex_entry %in% c("phdthesis", "mastersthesis")) { - return(cit_list) - } - - cit_list$`thesis-type` <- switch(bibtex_entry, - phdthesis = "PhD Thesis", - "Master's Thesis" - ) - - cit_list -} - -add_address <- function(cit_list) { - loc <- cit_list$location$name - # If available - if (is.null(loc)) { - return(cit_list) - } - - # At this point is in location, see to move - - # Logic order. - # 1. To conference - # 2. To institution - # 3. To publisher - # Otherwise leave on location - - nms <- names(cit_list) - has_conf <- "conference" %in% nms - has_inst <- "institution" %in% nms - has_publish <- "publisher" %in% nms - - if (!any(has_conf, has_inst, has_publish)) { - return(cit_list) - } - - if (has_conf) { - cit_list$conference$address <- loc - cit_list$location <- NULL - } else if (has_inst) { - cit_list$institution$address <- loc - cit_list$location <- NULL - } else { - cit_list$publisher$address <- loc - cit_list$location <- NULL - } - - return(cit_list) -} - -add_bibtex_coltype <- function(field_list) { - # Add collection-type if applicable and rearrange fields - nms <- names(field_list) - - if (!"collection-title" %in% nms) { - return(field_list) - } - - # Made collection-type if we create collection-title - bibtex_type <- field_list$bibtex_entry - - # Remove `in` at init: inbook, incollection affected - coltype <- clean_str(gsub("^in", "", bibtex_type)) - field_list$`collection-type` <- coltype - - # Rearrange to make both collection keys together - nm_first <- nms[seq(1, match("collection-title", nms))] - - nms_end <- unique(c(nm_first, "collection-type", nms)) - - field_list <- field_list[nms_end] - - return(field_list) -} - -fallback_dates <- function(cit_list) { - # Fallback for year and month: use date-published - if (is.null(cit_list$month) && !is.null(cit_list$`date-published`)) { - cit_list$month <- format(as.Date(cit_list$`date-published`), "%m") - } - - if (is.null(cit_list$year) && !is.null(cit_list$`date-published`)) { - cit_list$year <- format(as.Date(cit_list$`date-published`), "%Y") - } - - ## month ---- - cit_list$month <- get_bibtex_month(cit_list) - - return(cit_list) -} diff --git a/R/assertions.R b/R/assertions.R index fefb8dd3..2c275eb1 100644 --- a/R/assertions.R +++ b/R/assertions.R @@ -55,16 +55,8 @@ is_cff <- function(x) { #' @param x object to be evaluated #' @noRd is_cff_file <- function(x) { - if (!inherits(x, "character")) { - return(FALSE) - } - - if (tools::file_ext(x) != "cff") { - return(FALSE) - } - - stopifnotexists(x) - return(TRUE) + src <- detect_x_source(x) + return(src == "cff_citation") } #' Check if an url is from GitHub @@ -79,40 +71,6 @@ is_github <- function(x) { return(res) } -#' Error if it is not a `cff` file or object -#' @param x file to be evaluated -#' @noRd -stopifnotcff <- function(x) { - if (is_cff(x)) { - return(invisible()) - } - - # x should be character at least - if (!inherits(x, "character")) { - cli::cli_abort( - "{.var x} is an object of class {.cls {class(x)}}, not {.cls cff}." - ) - } - - guess <- detect_x_source(x) - - if (guess != "cff_citation") { - cli::cli_abort( - "{.var x} is not a {.file *.cff} file" - ) - } -} - -#' Error if file doesn't exists -#' @param x file to be evaluated -#' @noRd -stopifnotexists <- function(x) { - if (!file.exists(x)) { - cli::cli_abort("{.file {x}} doesn't exist") - } - return(invisible(NULL)) -} - #' Check if `x` has names #' @param x object to be evaluated #' @noRd diff --git a/R/cff.R b/R/cff.R index c2a3629d..751993c2 100644 --- a/R/cff.R +++ b/R/cff.R @@ -66,14 +66,12 @@ #' @export cff <- function(path, ...) { if (!missing(path)) { - if (is_cff_file(path)) { - lifecycle::deprecate_soft( - "1.0.0", "cff(path)", "cff_read_cff_citation()" - ) + src <- detect_x_source(path) + if (src == "cff_citation") { + lifecycle::deprecate_soft("1.0.0", "cff(path)", "cff_read_cff_citation()") return(cff_read_cff_citation(path)) } else { - lifecycle::deprecate_soft( - "1.0.0", "cff(path)", + lifecycle::deprecate_soft("1.0.0", "cff(path)", details = "Argument ignored." ) } diff --git a/R/cff_create.R b/R/cff_create.R index 14f04376..8ed8ac00 100644 --- a/R/cff_create.R +++ b/R/cff_create.R @@ -145,13 +145,13 @@ cff_create <- function(x, keys = list(), cff_version = "1.2.0", } # Build cff and return paths if any - result_parsed <- build_cff_and_paths( + result_paths <- build_cff_and_paths( x, cff_version, gh_keywords, dependencies, authors_roles, hint_source ) - desc_path <- result_parsed[["desc_path"]] - cffobjend <- result_parsed[["cffobjend"]] + desc_path <- result_paths[["desc_path"]] + cffobjend <- result_paths[["cffobjend"]] diff --git a/R/cff_gha_update.R b/R/cff_gha_update.R index 867f064f..2b516d77 100644 --- a/R/cff_gha_update.R +++ b/R/cff_gha_update.R @@ -45,7 +45,7 @@ cff_gha_update <- function(path = ".", newfile <- file.path(destdir, "update-citation-cff.yaml") - if (!file.exists(newfile) || isTRUE(overwrite)) { + if (!file_exist_abort(newfile) || isTRUE(overwrite)) { cli::cli_alert_success("Installing {.file {newfile}}") file.copy(system.file("yaml/update-citation-cff.yaml", package = "cffr"), @@ -61,7 +61,7 @@ cff_gha_update <- function(path = ".", ) } - if (file.exists(file.path(path, ".Rbuildignore"))) { + if (file_exist_abort(file.path(path, ".Rbuildignore"))) { ignore <- readLines(file.path(path, ".Rbuildignore")) # If not already diff --git a/R/cff_git_hook.R b/R/cff_git_hook.R index a2304d9e..dacf2052 100644 --- a/R/cff_git_hook.R +++ b/R/cff_git_hook.R @@ -89,7 +89,7 @@ cff_git_hook_remove <- function() { # nocov start hookfile <- file.path(".git", "hooks", "pre-commit") - if (file.exists(hookfile)) { + if (file_exist_abort(hookfile)) { cli::cli_alert_info( "Removing git pre-commit hook (was on {.path {hookfile}})" ) diff --git a/R/cff_read.R b/R/cff_read.R index 8a175c94..c71aea12 100644 --- a/R/cff_read.R +++ b/R/cff_read.R @@ -118,14 +118,7 @@ cff_read <- function(path, ...) { ) } - if (!file.exists(path)) { - cli::cli_abort( - paste( - "{.file {path}} does not exist. ", - "Check the {.file {dirname(path)}} directory" - ) - ) - } + file_exist_abort(path, abort = TRUE) filetype <- detect_x_source(path) if (filetype == "dontknow") { @@ -151,14 +144,7 @@ cff_read <- function(path, ...) { #' @export #' @rdname cff_read cff_read_cff_citation <- function(path, ...) { - if (!file.exists(path)) { - cli::cli_abort( - paste( - "{.file {path}} does not exist. ", - "Check the {.file {dirname(path)}} directory" - ) - ) - } + file_exist_abort(path, abort = TRUE) cffobj <- yaml::read_yaml(path, ...) new_cff(cffobj) @@ -169,14 +155,7 @@ cff_read_cff_citation <- function(path, ...) { cff_read_description <- function(path, cff_version = "1.2.0", gh_keywords = TRUE, authors_roles = c("aut", "cre"), ...) { - if (!file.exists(path)) { - cli::cli_abort( - paste( - "{.file {path}} does not exist. ", - "Check the {.file {dirname(path)}} directory" - ) - ) - } + file_exist_abort(path, abort = TRUE) pkg <- desc::desc(path) pkg$coerce_authors_at_r() @@ -216,14 +195,7 @@ cff_read_description <- function(path, cff_version = "1.2.0", #' @export #' @rdname cff_read cff_read_citation <- function(path, meta = NULL, ...) { - if (!file.exists(path)) { - cli::cli_abort( - paste( - "{.file {path}} does not exist. ", - "Check the {.file {dirname(path)}} directory" - ) - ) - } + file_exist_abort(path, abort = TRUE) if (!any(is.null(meta), inherits(meta, "packageDescription"))) { # nolint start @@ -273,14 +245,7 @@ cff_read_citation <- function(path, meta = NULL, ...) { #' @family bibtex #' @rdname cff_read cff_read_bib <- function(path, encoding = "UTF-8", ...) { - if (!file.exists(path)) { - cli::cli_abort( - paste( - "{.file {path}} does not exist. ", - "Check the {.file {dirname(path)}} directory" - ) - ) - } + file_exist_abort(path, abort = TRUE) # nocov start if (!requireNamespace("bibtex", quietly = TRUE)) { @@ -304,7 +269,7 @@ cff_read_bib <- function(path, encoding = "UTF-8", ...) { #' Internal version of cff_read_citation, safe #' @noRd cff_safe_read_citation <- function(desc_path, cit_path) { - if (!file.exists(cit_path) || !file.exists(desc_path)) { + if (!file_exist_abort(cit_path) || !file_exist_abort(desc_path)) { return(NULL) } # Create meta diff --git a/R/cff_validate.R b/R/cff_validate.R index cbf73f30..f728ebec 100644 --- a/R/cff_validate.R +++ b/R/cff_validate.R @@ -1,11 +1,10 @@ #' Validate a `CITATION.cff` file or a [`cff`] object #' #' @description -#' Validate a `CITATION.cff` file or a [`cff`] object created with -#' [cff_create()] using the corresponding validation +#' Validate a `CITATION.cff` file or a [`cff`] object using the corresponding #' ```{r, echo=FALSE, results='asis'} #' -#' cat(paste0("\n", "[schema.json]", +#' cat(paste0(" [validation schema]", #' "(https://github.com/citation-file-format/", #' "citation-file-format/blob/main/schema.json).")) #' @@ -25,14 +24,20 @@ #' #' ``` #' -#' @return A message indicating the result of the validation and an invisible -#' value `TRUE/FALSE`. On error, the results would have an attribute -#' `"errors"` containing the error summary (see **Examples** and [attr()]). +#' @return +#' +#' A message indicating the result of the validation and an invisible value +#' `TRUE/FALSE`. On error, the results would have an attribute `"errors"` +#' containing the error summary (see **Examples** and [attr()]). #' #' @param x This is expected to be either a `cff` object created #' with [cff_create()] or the path to a `CITATION.cff` file to be validated. #' @inheritParams cff_write #' +#' @seealso +#' [jsonvalidate::json_validate()], that is the function that performs the +#' validation. +#' #' @examples #' \donttest{ #' # Full .cff example @@ -58,28 +63,20 @@ #' try(cff_validate(system.file("CITATION", package = "cffr"))) cff_validate <- function(x = "CITATION.cff", verbose = TRUE) { # If is a cff create the object - if (is_cff(x)) { - tmpfile <- tempfile(fileext = ".cff") - suppressMessages(yaml::write_yaml(x, tmpfile)) - path <- tmpfile - is_tmpfile <- TRUE + if (!is_cff(x)) { + # Check + abort_if_not_cff(x) + is_a <- paste0("{.file ", x, "}") + # nolint end + x <- cff_read_cff_citation(x) } else { - path <- x - is_tmpfile <- FALSE + is_a <- "This {.cls cff}" } - # Check - stopifnotexists(path) - stopifnotcff(path) - - # Read file - citfile <- yaml::read_yaml(path) + # Convert to list + citfile <- as.list(x) - # Clean up - if (is_tmpfile) file.remove(path) - - # Convert all elements to character # This prevent errors with jsonvalidate citfile <- rapply(citfile, function(x) as.character(x), how = "replace") @@ -103,37 +100,23 @@ cff_validate <- function(x = "CITATION.cff", verbose = TRUE) { get_errors$message, "}\n", collapse = "" ) - if (is_tmpfile) { - cli::cli_alert_danger( - paste0( - "Oops! This {.cls cff} has the following errors:\n", - ll - ) - ) - } else { - cli::cli_alert_danger( - paste0( - "Oops! {.file {x}} has the following errors:\n", - ll - ) - ) - } + cli::cli_alert_danger( + paste0("Oops! ", is_a, " has the following errors:\n", ll) + ) } + # Prepare output - r <- FALSE - attr(r, "errors") <- get_errors - return(invisible(r)) - } else { - if (verbose) { - cli::cat_rule("Validating cff", col = "cyan", line = 2) - if (is_tmpfile) { - cli::cli_alert_success("Congratulations! This {.cls cff} is valid") - } else { - cli::cli_alert_success("Congratulations! {.file {x}} is valid") - } - } - return(invisible(TRUE)) + attr(result, "errors") <- get_errors + return(invisible(result)) + } + + if (verbose) { + cli::cat_rule("Validating cff", col = "cyan", line = 2) + cli::cli_alert_success( + paste0("Congratulations! ", is_a, " is valid") + ) } + return(invisible(result)) } # Validate schema diff --git a/R/cff_write.R b/R/cff_write.R index ad46a02a..78743978 100644 --- a/R/cff_write.R +++ b/R/cff_write.R @@ -121,7 +121,7 @@ cff_write <- function(x, outfile = "CITATION.cff", keys = list(), } # Add CITATION.cff to .Rbuildignore - if (!is_cff(x) && x == getwd() && file.exists(".Rbuildignore")) { + if (!is_cff(x) && x == getwd() && file_exist_abort(".Rbuildignore")) { ignore <- readLines(".Rbuildignore") # If not already diff --git a/R/cff_write_misc.R b/R/cff_write_misc.R index ac957920..d6a68d7a 100644 --- a/R/cff_write_misc.R +++ b/R/cff_write_misc.R @@ -30,8 +30,8 @@ #' #' @details #' -#' When `x` is a `cff` object it would be converted to `bibentry` using -#' [as_bibentry()]. +#' When `x` is a `cff` object it would be converted to `Bibtex` using +#' [toBibtex.cff()]. #' #' For security reasons, if the file already exists the function would create #' a backup copy on the same directory. @@ -84,7 +84,7 @@ cff_write_bib <- function(x, file = tempfile(fileext = ".bib"), append = FALSE, btex <- enc2utf8(btex) } - if (tools::file_ext(file) != "bib") file <- paste0(file, ".bib") + if (detect_x_source(file) != "bib") file <- paste0(file, ".bib") write_lines_msg(btex, file, verbose, append) return(invisible(NULL)) } @@ -128,40 +128,3 @@ cff_write_citation <- function(x, file = tempfile("CITATION_"), write_lines_msg(bentr, file, verbose, append) return(invisible(NULL)) } - - -write_lines_msg <- function(lines, file, verbose, append) { - # Check that the directory exists, if not create - dir <- dirname(path.expand(file)) - if (!dir.exists(dir)) { - if (verbose) cli::cli_alert_info("Creating directory {.path {dir}}") - dir.create(dir, recursive = TRUE) - } - - # If exists creates a backup - if (file.exists(file)) { - for (i in seq(1, 100)) { - f <- paste0(file, ".bk", i) - if (!file.exists(f)) break - } - - if (verbose) { - cli::cli_alert_info( - "Creating a backup of {.file {file}} in {.file {f}}" - ) - } - file.copy(file, f) - } - - - fh <- file(file, encoding = "UTF-8", open = ifelse(append, "a+", "w+")) - on.exit(if (isOpen(fh)) close(fh)) - if (verbose) { - cli::cli_alert_info("Writing {length(lines)} entr{?y/ies} ...") - } - - writeLines(lines, fh) - if (verbose) { - cli::cli_alert_success("Results written to {.file {file}}") - } -} diff --git a/R/deprecated.R b/R/deprecated.R index f468f415..080b4f03 100644 --- a/R/deprecated.R +++ b/R/deprecated.R @@ -101,7 +101,7 @@ cff_to_bibtex <- function(x, #' cff_read_bib(x2) #' } cff_from_bibtex <- function(x, encoding = "UTF-8", ...) { - if (length(x) == 1 && file.exists(x)) { + if (length(x) == 1 && file_exist_abort(x)) { if (requireNamespace("lifecycle", quietly = TRUE)) { lifecycle::deprecate_soft( "1.0.0", "cff_from_bibtex()", "cff_read_bib()" diff --git a/R/methods.R b/R/methods.R index 126a55b9..3c8613af 100644 --- a/R/methods.R +++ b/R/methods.R @@ -174,11 +174,18 @@ as.person.cff_pers <- function(x) { as.person.cff_pers_list <- function(x) { pers <- lapply(x, make_r_person) - # If not all extracted, malformed, return null + # If not all extracted inform if (!all(lengths(pers) > 0)) { - return(person()) + cli::cli_alert_info( + "Can't create {.cls person} for some elements of {.arg x}." + ) } - do.call(c, pers) + end <- do.call(c, pers) + if (any(duplicated(end))) { + cli::cli_alert_info("Removing duplicate {.cls person} objects.") + end <- end[!duplicated(end)] + } + end } diff --git a/R/utils-alerts.R b/R/utils-alerts.R new file mode 100644 index 00000000..6024e954 --- /dev/null +++ b/R/utils-alerts.R @@ -0,0 +1,88 @@ +#' Error if it is not a `cff` file or object +#' @param x file to be evaluated +#' @noRd +abort_if_not_cff <- function(x) { + if (is_cff(x)) { + return(invisible()) + } + + # x should be character at least + if (!inherits(x, "character")) { + cli::cli_abort( + "{.var x} is an object of class {.cls {class(x)}}, not {.cls cff}." + ) + } + + guess <- detect_x_source(x) + + if (guess != "cff_citation") { + cli::cli_abort( + "{.var x} is not a {.file *.cff} file." + ) + } +} + +#' Error if file doesn't exists +#' @param x file to be evaluated +#' @param abort Throw an error if does not exist +#' @noRd +file_exist_abort <- function(x, abort = FALSE) { + res <- file.exists(x) + + if (all(abort, isFALSE(res))) { + cli::cli_abort( + "{.file {x}} doesn't exist. Check the {.file {dirname(x)}} directory" + ) + } + return(invisible(res)) +} + +match_cff_arg <- function(arg, valid, for_msg, call = environment()) { + arg <- as.character(arg)[1] + valid <- as.character(valid) + + if (!arg %in% valid) { + cli::cli_abort( + "{.arg {for_msg}} should be {.or {.val {valid}}}, not {.val {arg}}.", + call = call + ) + } + + return(arg) +} + +write_lines_msg <- function(lines, file, verbose, append) { + # Check that the directory exists, if not create + dir <- dirname(path.expand(file)) + if (!dir.exists(dir)) { + if (verbose) cli::cli_alert_info("Creating directory {.path {dir}}") + dir.create(dir, recursive = TRUE) + } + + # If exists creates a backup + if (file_exist_abort(file)) { + for (i in seq(1, 100)) { + f <- paste0(file, ".bk", i) + if (!file_exist_abort(f)) break + } + + if (verbose) { + cli::cli_alert_info( + "Creating a backup of {.file {file}} in {.file {f}}" + ) + } + file.copy(file, f) + } + + + fh <- file(file, encoding = "UTF-8", open = ifelse(append, "a+", "w+")) + on.exit(if (isOpen(fh)) close(fh)) + if (verbose) { + cli::cli_alert_info("Writing {length(lines)} entr{?y/ies} ...") + } + + writeLines(lines, fh) + if (verbose) { + cli::cli_alert_success("Results written to {.file {file}}") + } +} diff --git a/R/utils-create.R b/R/utils-create.R index 951dbb11..dfc46c2e 100644 --- a/R/utils-create.R +++ b/R/utils-create.R @@ -1,4 +1,4 @@ -#' Merge the information of a parsed description with a parsed citation +#' Merge the information of a coerced description with a coerced citation #' @noRd merge_desc_cit <- function(cffobj, citobj) { # If no citobj then return null @@ -82,7 +82,7 @@ parse_dependencies <- function(desc_path, if (!is.character(desc_path)) { return(NULL) } - if (!file.exists(desc_path)) { + if (!file_exist_abort(desc_path)) { return(NULL) } # nocov end @@ -143,7 +143,7 @@ parse_dependencies <- function(desc_path, # urls from citation() vary due to auto = TRUE dfile <- system.file("DESCRIPTION", package = n$package) - if (file.exists(dfile)) { + if (file_exist_abort(dfile)) { pkg <- desc::desc(dfile) mod$url <- parse_desc_urls(pkg)$url mod$repository <- parse_desc_repository(pkg) diff --git a/R/utils-persons.R b/R/utils-persons.R index b929f302..68500ffc 100644 --- a/R/utils-persons.R +++ b/R/utils-persons.R @@ -260,11 +260,11 @@ bibtex_pers_first_von_last <- function(x) { return(end_list) } -validate_cff_person_fields <- function(parsed_person) { +validate_cff_person_fields <- function(person_cff) { # Entity of person # Guess entity or person - is_entity <- as.character("name" %in% names(parsed_person)) + is_entity <- as.character("name" %in% names(person_cff)) # Keep only valid tags - Would depend on entity or person definition <- switch(is_entity, @@ -272,12 +272,12 @@ validate_cff_person_fields <- function(parsed_person) { cff_schema_definitions_person() ) - parsed_person <- parsed_person[names(parsed_person) %in% definition] + person_cff <- person_cff[names(person_cff) %in% definition] # Duplicates removed - parsed_person <- parsed_person[!duplicated(names(parsed_person))] + person_cff <- person_cff[!duplicated(names(person_cff))] - parsed_person + person_cff } guess_hint <- function(person) { diff --git a/R/utils-read-description.R b/R/utils-read-description.R index 798dd6ac..e57ad475 100644 --- a/R/utils-read-description.R +++ b/R/utils-read-description.R @@ -1,4 +1,4 @@ -# Functions to parse field on DESCRIPTION file +# Functions to convert fields on DESCRIPTION file #' Mapped to Description #' @noRd diff --git a/R/utils.R b/R/utils.R index f69a0201..5fdbd5f8 100644 --- a/R/utils.R +++ b/R/utils.R @@ -248,32 +248,18 @@ detect_x_source <- function(x) { return("dontknow") } -match_cff_arg <- function(arg, valid, for_msg, call = environment()) { - arg <- as.character(arg)[1] - valid <- as.character(valid) - - if (!arg %in% valid) { - cli::cli_abort( - "{.arg {for_msg}} should be {.val {valid}}, not {.val {arg}}.", - call = call - ) - } - - return(arg) -} - file_path_or_null <- function(x) { x_c <- clean_str(x) if (is.null(x_c)) { return(x) } - if (file.exists(x)) { + if (file_exist_abort(x)) { return(x) } return(NULL) } -#' Parse and clean data from DESCRIPTION to create metadata +#' Coerce and clean data from DESCRIPTION to create metadata #' @noRd clean_package_meta <- function(meta) { if (!inherits(meta, "packageDescription")) { diff --git a/README.Rmd b/README.Rmd index 71cccf60..69870225 100644 --- a/README.Rmd +++ b/README.Rmd @@ -36,7 +36,7 @@ bytes](https://img.shields.io/github/languages/code-size/ropensci/cffr) -**cffr** provides utilities to generate, parse, modify and validate +**cffr** provides utilities to generate, coerce, modify and validate `CITATION.cff` files automatically for **R** packages, as well as tools and examples for working with .cff more generally. diff --git a/README.md b/README.md index 582a3f42..27bc9086 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ bytes](https://img.shields.io/github/languages/code-size/ropensci/cffr) -**cffr** provides utilities to generate, parse, modify and validate +**cffr** provides utilities to generate, coerce, modify and validate `CITATION.cff` files automatically for **R** packages, as well as tools and examples for working with .cff more generally. @@ -72,7 +72,7 @@ file and the `CITATION` file (if present) of your package. Note that **cffr** works best if your package pass `R CMD check/devtools::check()`. -As per 2024-03-05 there are at least 290 repos on GitHub using **cffr**. +As per 2024-03-07 there are at least 290 repos on GitHub using **cffr**. [Check them out here](https://github.com/search?q=cffr%20path%3A**%2FCITATION.cff&type=code). @@ -561,6 +561,26 @@ test <- cff_create("rmarkdown") - family-names: Chirico. given-names: Michael year: '2024' + - type: software + title: dygraphs + abstract: 'dygraphs: Interface to ''Dygraphs'' Interactive Time Series Charting + Library' + notes: Suggests + url: https://github.com/rstudio/dygraphs + repository: https://CRAN.R-project.org/package=dygraphs + authors: + - family-names: Vanderkam + given-names: Dan + website: http://dygraphs.com/ + - family-names: Allaire + given-names: JJ + - family-names: Owen + given-names: Jonathan + - family-names: Gromer + given-names: Daniel + - family-names: Thieurmel + given-names: Benoit + year: '2024' - type: software title: fs abstract: 'fs: Cross-Platform File System Operations Based on ''libuv''' @@ -577,6 +597,26 @@ test <- cff_create("rmarkdown") given-names: Gábor email: csardi.gabor@gmail.com year: '2024' + - type: software + title: rsconnect + abstract: 'rsconnect: Deploy Docs, Apps, and APIs to ''Posit Connect'', ''shinyapps.io'', + and ''RPubs''' + notes: Suggests + url: https://rstudio.github.io/rsconnect/ + repository: https://CRAN.R-project.org/package=rsconnect + authors: + - family-names: Atkins + given-names: Aron + email: aron@posit.co + - family-names: Allen + given-names: Toph + - family-names: Wickham + given-names: Hadley + - family-names: McPherson + given-names: Jonathan + - family-names: Allaire + given-names: JJ + year: '2024' - type: software title: downlit abstract: 'downlit: Syntax Highlighting and Automatic Linking' @@ -589,6 +629,19 @@ test <- cff_create("rmarkdown") email: hadley@posit.co year: '2024' version: '>= 0.4.0' + - type: software + title: katex + abstract: 'katex: Rendering Math to HTML, ''MathML'', or R-Documentation Format' + notes: Suggests + url: https://docs.ropensci.org/katex/ + repository: https://CRAN.R-project.org/package=katex + authors: + - family-names: Ooms + given-names: Jeroen + email: jeroen@berkeley.edu + orcid: https://orcid.org/0000-0002-4035-0289 + year: '2024' + version: '>= 1.4.0' - type: software title: sass abstract: 'sass: Syntactically Awesome Style Sheets (''Sass'')' @@ -699,6 +752,18 @@ test <- cff_create("rmarkdown") given-names: Davis email: davis@posit.co year: '2024' + - type: software + title: cleanrmd + abstract: 'cleanrmd: Clean Class-Less ''R Markdown'' HTML Documents' + notes: Suggests + url: https://pkg.garrickadenbuie.com/cleanrmd/ + repository: https://CRAN.R-project.org/package=cleanrmd + authors: + - family-names: Aden-Buie + given-names: Garrick + email: garrick@adenbuie.com + orcid: https://orcid.org/0000-0002-7111-0077 + year: '2024' - type: software title: withr abstract: 'withr: Run Code ''With'' Temporarily Modified Global State' diff --git a/codemeta.json b/codemeta.json index f26f97d2..3fe94e44 100644 --- a/codemeta.json +++ b/codemeta.json @@ -14,7 +14,7 @@ "name": "R", "url": "https://r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31)", + "runtimePlatform": "R version 4.3.3 (2024-02-29 ucrt)", "provider": { "@id": "https://cran.r-project.org", "@type": "Organization", @@ -200,7 +200,7 @@ }, "isPartOf": "https://ropensci.org", "keywords": ["attribution", "citation", "credit", "citation-files", "cff", "metadata", "r", "r-package", "citation-file-format", "rstats", "ropensci", "cran"], - "fileSize": "963.055KB", + "fileSize": "949.454KB", "citation": [ { "@type": "ScholarlyArticle", diff --git a/data-raw/test-2as_bibentry.R b/data-raw/test-2as_bibentry.R new file mode 100644 index 00000000..4c26b099 --- /dev/null +++ b/data-raw/test-2as_bibentry.R @@ -0,0 +1,510 @@ +# Test Bibtex ---- +test_that("Article to bibtex", { + bib <- bibentry("Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + number = 2, + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" + ) + expect_snapshot(toBibtex(bib)) + x <- as_cff(bib) + bib <- as_bibentry(x) + expect_snapshot(toBibtex(bib)) +}) + + +test_that("Book to bibtex", { + bib <- bibentry("Book", + key = "latex:companion", + author = "Frank Mittelbach and Michel Gossens + and Johannes Braams and David Carlisle + and Chris Rowley", + editor = "{Barnes and Noble}", + title = "The LaTeX Companion", + publisher = "Addison-Wesley Professional", + year = "2004", + # Optional + volume = "3", + number = 7, + series = "The LateX Books", + address = "Santa Monica", + edition = "Fourth", + month = "August", + note = "Example modified for testing purposes", + keywords = c("Two, keyword") + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + + +test_that("Booklet to bibtex", { + bib <- bibentry("Booklet", + key = "Mustermann2016", + title = "Java Booklet", + # Optional + author = "Max Mustermann", + howpublished = "Internet", + address = "Stuttgart", + month = "feb", + year = "2016", + note = "Example modified from Jabref", + keywords = "java" + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + +test_that("InBook to bibtex with pages", { + bib <- bibentry("InBook", + year = "2003", + month = "oct", + pages = "175--196", + title = "Architectural Mismatch Tolerance", + chapter = "Tolerances and Other Notes", + author = "R. de Lemos and C. Gacek and A. Romanovsky", + URL = "http://www.cs.kent.ac.uk/pubs/2003/1773", + publication_type = "inbook", + submission_id = "12884_1074884456", + ISBN = "3-540-40727-8", + editor = "A. Lalanda", + edition = "Fifth", + publisher = "Springer", + volume = "2677", + number = "234", + address = "Lozoya", + series = "Lecture Notes in Computer Science", + type = "Architecting Dependable Systems", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + +test_that("InCollection to bibtex", { + bib <- bibentry("InCollection", + author = "Klaus Abels", + title = "Who Gives a Damn about Minimizers in Questions?", + booktitle = "Proceedings from Semantics and Linguistic Theory {XIII}", + publisher = "Cornell University", + year = 2003, + editor = "Robert B. Young and Yuping Zhou", + pages = "1--18", + address = "Ithaca, New York", + topic = "interrogatives;nl-semantics;polarity;" + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + + +test_that("InProceedings to bibtex", { + bib <- bibentry("InProceedings", + author = "John Aberdeen and Samuel Bayer and Sasha Caskey and Laurie + Damianos and Alan Goldschen and Lynette Hirschman and + Dan Loehr and Hugo Trapper", + title = "Implementing Practical Dialogue Systems with the + {DARPA} Communicator Architecture", + booktitle = "Proceedings of the {IJCAI}-99 Workshop on + Knowledge and Reasoning in Practical Dialogue Systems", + year = 1999, + editor = "Jan Alexandersson", + pages = "81--86", + series = "A Series", + organization = "IJCAI", + publisher = "International Joint Conference on Artificial Intelligence", + address = "Murray Hill, New Jersey", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) + + # If we remove collection title use conference + bibparsed[[1]]$`collection-title` <- NULL + bibparsed[[1]]$conference$name <- "I Am a conference" + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + + +test_that("Manual to bibtex", { + bib <- bibentry("Manual", + author = "Gerhard Allwein and Dave Barker-Plummer and Jon Barwise + and John Etchemendy", + title = "{LPL} Software Manual", + publisher = "{CSLI} Publications", + year = 1999, + address = "Stanford, California", + howpublished = "CD-Rom", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + + +test_that("MastersThesis to bibtex", { + bib <- bibentry("MastersThesis", + author = "Murat Bayraktar", + title = "Computer-Aided Analysis of {E}nglish Punctuation on a + Parsed Corpus: The Special Case of Comma", + school = "Department of Computer Engineering and Information + Science, Bilkent University, Turkey", + address = "Ankara, Turkey", + year = 1996, + note = "Forthcoming", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + + +test_that("PhdThesis to bibtex", { + bib <- bibentry("PhdThesis", + author = "David I. Beaver", + title = "Presupposition and Assertion in Dynamic Semantics", + school = "Centre for Cognitive Science, University of Edinburgh", + year = 1995, + type = "Ph.D. Dissertation", + address = "Edinburgh", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + + +test_that("Proceedings to bibtex", { + bib <- bibentry("Proceedings", + title = "An Abductive Framework for Negation in Disjunctive + Logic Programming", + organization = "{JELIA}'96", + year = 1996, + editor = "Jose Julio Alferes and Luis Moniz Pereira and Eva Orlowska", + publisher = "Springer-Verlag", + address = "Berlin", + missinginfo = "pages", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + +test_that("TechReport to bibtex", { + bib <- bibentry("TechReport", + author = person("John M.", "Aronis", + comment = c(affiliation = "rOpenSci") + ), + title = "Implementing Inheritance on the Connection Machine", + institution = "Intelligent Systems Program, University of Pittsburgh", + number = "ISP 93-1", + year = 1993, + address = "Pittsburgh, PA 15260", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) + + # Fallback when missing institution + bibparsed[[1]]$institution <- NULL + + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + +test_that("Unpublished to bibtex", { + bib <- bibentry("Unpublished", + author = "John M. Aronis and Foster J. Provost", + title = "Efficiently Constructing Relational Features from Background", + year = 1959, + note = paste0( + "Unpublished MS, Computer Science Department, ", + "University of Pittsburgh." + ), + missinginfo = "Date is guess.", + ) + + expect_snapshot(toBibtex(bib)) + bibparsed <- as_cff(bib) + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) + + # With custom note + bibparsed[[1]]$notes <- NULL + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + +test_that("Test BibLateX entry", { + bib <- bibentry("Article", + author = "M. A. Kastenholz, and Philippe H. Hünenbergerb", + title = "Computation of methodology hyphen independent ionic solvation + free energies from molecular simulations", + journal = "J. Chem. Phys.", + year = 2006, + note = "Example modified for testing purposes", + pages = "55--65", + + # Additional BibLatex Fields + date = "2006-03-15", + file = "a_file.pdf", + issuetitle = "Semantic {3D} Media and Content", + translator = "Wicksteed, P. H. and {The translator factory}", + urldate = "2006-10-01", + pagetotal = 528, + abstract = "The computation of ionic solvation free energies from + atomistic simulations is a surprisingly difficult problem that + has found no satisfactory solution for more than 15 years.", + doi = "10.1063/1.2172593", + isbn = "0-816-52066-6", + issn = "0097-8493", + url = "http://www.ctan.org" + ) + expect_snapshot(toBibtex(bib)) + x <- as_cff(bib) + + + parsed <- as_bibentry(x) + expect_snapshot(toBibtex(parsed)) +}) + + +# Other testers ---- + +test_that("particle names", { + bib <- bibentry("Book", + title = "A Handbook for Scholars", + author = person("Mary-Claire", "van Leunen"), + year = 1979, + publisher = "Knopf" + ) + + + bibparsed <- as_cff(bib) + bibparsed[[1]]$authors <- as_cff_person( + "van Leunen, Mary-Claire and Davis, Jr., Sammy" + ) + + cffobj <- cff_create(cff(), + keys = list(references = bibparsed) + ) + + expect_true(cff_validate(cffobj, verbose = FALSE)) + + expect_snapshot(bibparsed) + + bib <- as_bibentry(bibparsed) + expect_snapshot(toBibtex(bib)) +}) + +test_that("From plain cff with a citation", { + s <- cff() + s <- cff_create(s) + + acit <- bibentry( + bibtype = "misc", title = "title", year = 1999, + author = "John Doe", + month = 3 + ) + + s$`preferred-citation` <- as_cff(acit)[[1]] + s$`preferred-citation`$editors <- as_cff_person("A name") + + bib <- as_bibentry(s) + expect_snapshot(toBibtex(bib)) +}) + +test_that("From plain cff", { + expect_silent(bib <- as_bibentry(cff())) + expect_snapshot(toBibtex(bib)) + + + expect_snapshot(as_bibentry(cff(), what = "anda"), error = TRUE) +}) + +test_that("From file", { + file <- system.file("examples/CITATION_complete.cff", + package = "cffr" + ) + + bib <- as_bibentry(file) + expect_snapshot(toBibtex(bib)) + + expect_snapshot(as_bibentry("anunkonwpackage"), error = TRUE) +}) + + +test_that("Test anonymous", { + bib <- bibentry("Booklet", + title = "A booklet" + ) + + + expect_silent(back <- as_bibentry(as_cff(bib))) + expect_snapshot(toBibtex(back)) + + + bib <- bibentry("manual", + title = "A manual" + ) + + + expect_silent(back <- as_bibentry(as_cff(bib))) + expect_snapshot(toBibtex(back)) + + bib <- bibentry("misc", + title = "A misc" + ) + + + expect_silent(back <- as_bibentry(as_cff(bib))) + expect_snapshot(toBibtex(back)) + + bib <- bibentry("proceedings", + title = "proceedings", + year = 1984 + ) + + + expect_silent(back <- as_bibentry(as_cff(bib))) + expect_snapshot(toBibtex(back)) +}) + +test_that("Fallback month", { + bib <- bibentry("Article", + title = "An Article", + author = "John Doe", + journal = "El Adelantado de Segovia", + year = "1678", + date = "1678-04-23" + ) + + expect_snapshot(toBibtex(bib)) + x <- as_cff(bib) + + # Delete here the month + x$month <- NULL + + bibback <- as_bibentry(x) + expect_snapshot(toBibtex(bibback)) +}) + + +test_that("Test Fallback year", { + x <- cff() + + expect_silent(msg <- as_bibentry(x)) + + expect_snapshot(toBibtex(msg)) + + + x$`date-released` <- "2020-01-01" + + expect_true(cff_validate(x, verbose = FALSE)) + + parsed <- as_bibentry(x) + + expect_snapshot(toBibtex(parsed)) +}) + +test_that("Errors", { + expect_silent(b <- as_bibentry("testthat")) + expect_s3_class(b, "bibentry") + expect_error(as_bibentry("testthat", what = "aa")) +}) + +test_that("From package", { + skip_if_not_installed("rmarkdown") + + base <- as_bibentry("rmarkdown") + + expect_s3_class(base, "bibentry") + + expect_length(base, 1) + + refs <- as_bibentry("rmarkdown", what = "references") + expect_s3_class(refs, "bibentry") + + expect_gte(length(refs), 1) + + all <- as_bibentry("rmarkdown", what = "all") + expect_s3_class(all, "bibentry") + + expect_length(all, length(base) + length(refs)) +}) + +test_that("NULL references", { + basic <- cff() + + expect_identical( + as_bibentry(basic, what = "references"), + bibentry() + ) + + # Test all + expect_silent(l <- as_bibentry(basic, what = "all")) + expect_length(l, 1) +}) + + +test_that("From CITATION.cff", { + p <- system.file("examples/smith-et-al.cff", package = "cffr") + + base <- as_bibentry(p) + + expect_s3_class(base, "bibentry") + + expect_length(base, 1) +}) + +test_that("Corrupt entry", { + bib <- bibentry("Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + number = 2, + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" + ) + x <- as_cff(bib)[[1]] + x$year <- NULL + x$journal <- NULL + expect_snapshot(n <- as_bibentry(x)) + expect_identical(bibentry(), bibentry()) +}) diff --git a/data-raw/test2-as_cff_reference.R b/data-raw/test2-as_cff_reference.R new file mode 100644 index 00000000..d3551a1f --- /dev/null +++ b/data-raw/test2-as_cff_reference.R @@ -0,0 +1,235 @@ +# Underlying tests in as_bibentry +# Corner cases here + +test_that("citations with installed packages", { + installed <- as.character(installed.packages()[, 1]) + inst <- c("base", "jsonlite", "rmarkdown") + for (i in seq_len(length(inst))) { + if (inst[i] %in% installed) { + desc <- cff_create(inst[i]) + expect_true(length(desc$`preferred-citation`) > 1) + expect_true(cff_validate(desc, verbose = FALSE)) + } + } +}) + +test_that("Test full with CITATION and (option = author)", { + # Needs an installed package + desc_path <- system.file("examples/DESCRIPTION_rgeos", package = "cffr") + cit_path <- system.file("examples/CITATION_auto", package = "cffr") + parsed <- cff_safe_read_citation(desc_path, cit_path) + expect_s3_class(parsed, "cff") + + # Create cff + cffobj <- cff_create(desc_path, keys = list( + references = parsed + )) + + expect_s3_class(cffobj, "cff") + expect_snapshot(cffobj) + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + + +test_that("Parsed several citations", { + # Needs an installed package + desc_path <- system.file("examples/DESCRIPTION_rgeos", package = "cffr") + cit_path <- system.file("examples/CITATION_auto", package = "cffr") + citobj <- cff_safe_read_citation(desc_path, cit_path) + expect_s3_class(citobj, c("cff_ref_list", "cff"), exact = TRUE) + + expect_snapshot(citobj) + expect_length(citobj, 3) +}) + + +test_that("Add wrong field to citation", { + bib <- bibentry( + bibtype = "Manual", + title = "favoritefood is not valid on cff schema", + author = "Jane Smith", + favoritefood = "bananas", + type = "I should be removed" + ) + + bibparsed <- as_cff(bib) + + expect_s3_class(bibparsed, "cff") + + cffobj <- cff_create(cff(), + keys = list( + references = bibparsed + ) + ) + + expect_snapshot(cffobj) + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + +test_that("Fix wrong orcid", { + bib <- bibentry( + bibtype = "Manual", + title = "Wrong orcid fixed by cffr", + author = person("Jane", + "Smith", + comment = c( + ORCID = + "http://orcid.org/0000-0000-0000-306X" + ) + ) + ) + + bibparsed <- as_cff(bib) + + expect_s3_class(bibparsed, "cff") + + cffobj <- cff_create(cff(), + keys = list(references = bibparsed) + ) + + expect_snapshot(cffobj) + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + +test_that("Several identifiers and duplicates", { + bib <- bibentry( + bibtype = "Manual", + title = "A Language and Environment for Statistical Computing", + year = "2022", + year = "2023", + author = person("R Core Team"), + version = NULL, + error = "", + url = "https://www.R-project.org/", + url = "https://google.com/", + doi = "10.5281/zenodo.5366600", + doi = "10.5281/zenodo.5366601", + doi = "10.5281/zenodo.5366602", + identifiers = "a,b" + ) + + bibparsed <- as_cff(bib) + + expect_s3_class(bibparsed, "cff") + + cffobj <- cff_create(cff(), + keys = list( + references = bibparsed + ) + ) + + expect_snapshot(cffobj) + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + +test_that("Test keywords and urls", { + bib <- bibentry( + bibtype = "Manual", + title = "A Language and Environment for Statistical Computing", + year = "2022", + author = person("R Core Team"), + url = "https://www.R-project.org/", + url = "https://google.com/", + keywords = "Some, random keywords, in, here, here" + ) + + bibparsed <- as_cff(bib) + + expect_s3_class(bibparsed, "cff") + + cffobj <- cff_create(cff(), + keys = list( + references = bibparsed + ) + ) + + expect_snapshot(cffobj) + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + +test_that("Coerce persons on CITATION", { + bib <- bibentry("Manual", + title = "A Language and Environment for Statistical Computing", + year = "2021", + author = person("R Core Team"), + contact = "A name and A contact", + conference = person("A", "conference"), + "database-provider" = person("Database", "provider"), + editors = "A editor and {Ben and Jerry}", + "editors-series" = "An {editor series} and Another", + "institution" = person("A", "institution"), + "address" = person("A", "location"), + "publisher" = person("A", "publisher"), + "recipients" = "A recipient", + "senders" = "{A Sender} and Another Sender", + "translators" = "Translator one and Translator two" + ) + + bibparsed <- as_cff(bib) + expect_snapshot(bibparsed) + + cffobj <- cff_create(cff(), + keys = list(references = bibparsed) + ) + + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + +test_that("Test inputs", { + # Remove type + + bib <- bibentry("Book", + title = "Test", + author = "Billy Jean", + year = "2021", + publisher = "Random House", + type = "RANDOM" + ) + + + bibparsed <- as_cff(bib) + expect_snapshot(bibparsed) + + cffobj <- cff_create(cff(), + keys = list(references = bibparsed) + ) + + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + +test_that("Fallback date", { + bib <- bibentry("Misc", + title = "Test", + author = "Billy Jean", + date = "2050-01-12", + urldate = "2099-02-02", + publisher = "Random House", + type = "RANDOM" + ) + + init_cff <- as_cff(bib) + + expect_snapshot(init_cff) +}) + +test_that("Duplicates", { + bib <- bibentry("Misc", + title = "Test", + author = "Billy Jean", + date = "2050-01-12", + urldate = "2099-02-02", + publisher = "Random House", + type = "RANDOM" + ) + bib2 <- bibentry("Manual", + title = "Test", + author = "Billy Jean", + date = "2050-01-12", + urldate = "2099-02-02", + publisher = "Random House", + type = "RANDOM" + ) + bibend <- c(rep(bib, 3), bib2) + expect_snapshot(uniq <- as_cff(bibend)) + expect_length(uniq, 2) +}) diff --git a/data/cran_to_spdx.rda b/data/cran_to_spdx.rda index 33fae2994ac0070f09336d3d8c403ec099798a1f..7a4a37397cc9557c55af6dabb875bd3b8605aab7 100644 GIT binary patch literal 916 zcmV;F18e*riwFP!000002F+JXPuoBccH=-unm|gSm3lH&4Vna`W6qYoKWTU7l z6%~ibghjRFILLPB0My-_$F1vgh;6h(a+a}}kDKNl%S?LaGCmOH(HnXZ?kH84`? zaU^NmM~9lRlR~SV8+-f-X{IwqUVu_`lSj4f!$VCcTb|35yRDs~y+`x*9&vlu1vOTs zniqoR1M&lB>M@LZ#}Az#bjE;U8w8VB+&CVdXgV0X_AC(Nk9CGzSI4@MK1y^fX7HGL zJS3-3!AsE2p1cO3cKi{XLNM%Tdo%z7=P`~~jWGsLAtcLXh+y9&&s|9*pr>*pk@+Lp zg44+aXL2yRbeTkv1XH&waKJ`Ug9E|R8|WiDbZ(KBm<+T|82ZlTEQ}0cf3?0BBHyzk z*>;7zs@<$=>)q6Jo0dkodzR@}qBVA}tN6L%b#B{{XJRI$)w8K>qxYRN6+7li+LJ@(Q;t0mJHk{;2H~31B_8U z=T6>fgSxZIU=k+{YL8*c(S7DZm`s~D+4EURr0jYm+H44zxg0i3NfOst@U?b8V`S-B zdZ<|mk4eU+Im0y;7F3+oe^2b;rJ#LgO}{y=YZqKmxmKfXqKH9f!pj0ie%h#3mi&sv zHKSN$#*#%UpQ&OiSH)KT0IFx6>r9*wT?w;ZA^DAmJH8!Y>bYMz(BQ!*6!d{><0ecz zNyaMM)>B0P3y5rFOEKKBfexTJa)BYi7T^i!9zRc>o3)f6)K`|B!TNe+NIOUueH)-|#>H0ssI3&;$PH zMJcLQXdxn$LqY0#n*AnjWXMA%O-!8VroodWVq>8hVWZpyrJl8ess_0%@QOhK3*w7>x`OkO2~jFe$2d zXwawXnjVu)lzN&O4WcqVOllbqQxUPLFe>dz=&r?fZ9@1WKjajLMimfmJ2mUYPPPXx zj*Ybg1&%$?qNxPUhn5dz<$F-rN4=W12TeQiUlnt8<0b6Yj7qp-b#>kRV}V z`Fj%x=Ccapm4j`8K(v)wkqAVgQwwED3edK<)|qCdvuV!(PmL8wD1-^BC2K`h&j3QP zL0u36=!%xXc0(x;6=KLKJiY@Gylf7R8cZa*UN&lz^7{|Bd%yVmS_cO2<5+P%UUOvX zSz@tbnf*NMPJ>Nh!Uq@-^UcwV*1cylE@5sg3T+_5-MXcG?wWWD;2vA3o z&8+4_l-WR}LCwk=!sx_DBpAAMWvtJbNN+NfJb=(2mY@It diff --git a/inst/WORDLIST b/inst/WORDLIST index b58c68cd..7b966324 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -76,7 +76,6 @@ inproceedings json jsonvalidate param -parsers plaintext pre rOpenSci diff --git a/inst/schemaorg.json b/inst/schemaorg.json index 015b552c..d5eb18c9 100644 --- a/inst/schemaorg.json +++ b/inst/schemaorg.json @@ -26,6 +26,6 @@ "name": "Comprehensive R Archive Network (CRAN)", "url": "https://cran.r-project.org" }, - "runtimePlatform": "R version 4.3.2 (2023-10-31)", + "runtimePlatform": "R version 4.3.3 (2024-02-29 ucrt)", "version": "0.99.0.9000" } diff --git a/man/cff_validate.Rd b/man/cff_validate.Rd index 6b6079ff..490d5198 100644 --- a/man/cff_validate.Rd +++ b/man/cff_validate.Rd @@ -14,14 +14,12 @@ with \code{\link[=cff_create]{cff_create()}} or the path to a \code{CITATION.cff informative messages.} } \value{ -A message indicating the result of the validation and an invisible -value \code{TRUE/FALSE}. On error, the results would have an attribute -\code{"errors"} containing the error summary (see \strong{Examples} and \code{\link[=attr]{attr()}}). +A message indicating the result of the validation and an invisible value +\code{TRUE/FALSE}. On error, the results would have an attribute \code{"errors"} +containing the error summary (see \strong{Examples} and \code{\link[=attr]{attr()}}). } \description{ -Validate a \code{CITATION.cff} file or a \code{\link{cff}} object created with -\code{\link[=cff_create]{cff_create()}} using the corresponding validation -\href{https://github.com/citation-file-format/citation-file-format/blob/main/schema.json}{schema.json}. +Validate a \code{CITATION.cff} file or a \code{\link{cff}} object using the corresponding \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema.json}{validation schema}. } \examples{ \donttest{ @@ -50,6 +48,9 @@ try(cff_validate(system.file("CITATION", package = "cffr"))) \seealso{ \href{https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md}{Guide to Citation File Format schema version 1.2.0}. +\code{\link[jsonvalidate:json_validate]{jsonvalidate::json_validate()}}, that is the function that performs the +validation. + Other core functions of \CRANpkg{cffr}: \code{\link{cff}()}, \code{\link{cff_create}()}, diff --git a/man/cff_write_misc.Rd b/man/cff_write_misc.Rd index d197ebd5..54ccd473 100644 --- a/man/cff_write_misc.Rd +++ b/man/cff_write_misc.Rd @@ -52,8 +52,8 @@ Section 1.9 CITATION files of \emph{Writing R Extensions} (R Core Team 2023). } } \details{ -When \code{x} is a \code{cff} object it would be converted to \code{bibentry} using -\code{\link[=as_bibentry]{as_bibentry()}}. +When \code{x} is a \code{cff} object it would be converted to \code{Bibtex} using +\code{\link[=toBibtex.cff]{toBibtex.cff()}}. For security reasons, if the file already exists the function would create a backup copy on the same directory. diff --git a/tests/testthat/_snaps/as_bibentry.md b/tests/testthat/_snaps/as_bibentry.md index 170c208f..5c8bdf59 100644 --- a/tests/testthat/_snaps/as_bibentry.md +++ b/tests/testthat/_snaps/as_bibentry.md @@ -1,700 +1,74 @@ -# Article to bibtex +# as_bibentry default Code - toBibtex(bib) - Output - @Article{knuth:1984, - author = {{R Core Team}}, - title = {Literate Programming}, - journal = {The Computer Journal}, - year = {1984}, - volume = {27}, - number = {2}, - pages = {97--111}, - month = {January}, - keywords = {Some, simple, keywords}, - } - ---- - - Code - toBibtex(bib) - Output - @Article{rcoreteam:1984, - title = {Literate Programming}, - author = {{R Core Team}}, - year = {1984}, - month = {jan}, - journal = {The Computer Journal}, - volume = {27}, - number = {2}, - pages = {97--111}, - keywords = {Some,simple,keywords}, - } - -# Book to bibtex - - Code - toBibtex(bib) - Output - @Book{latex:companion, - author = {Frank Mittelbach and Michel Gossens and Johannes Braams and David Carlisle and Chris Rowley}, - editor = {{{Barnes} and {Noble}}}, - title = {The LaTeX Companion}, - publisher = {Addison-Wesley Professional}, - year = {2004}, - volume = {3}, - number = {7}, - series = {The LateX Books}, - address = {Santa Monica}, - edition = {Fourth}, - month = {August}, - note = {Example modified for testing purposes}, - keywords = {Two, keyword}, - } - ---- - - Code - toBibtex(bib) - Output - @Book{mittelbach_etall:2004, - title = {The LaTeX Companion}, - author = {Frank Mittelbach and Michel Gossens and Johannes Braams and David Carlisle and Chris Rowley}, - year = {2004}, - month = {aug}, - publisher = {Addison-Wesley Professional}, - address = {Santa Monica}, - editor = {{Barnes and Noble}}, - series = {The LateX Books}, - volume = {3}, - number = {7}, - note = {Example modified for testing purposes}, - edition = {Fourth}, - keywords = {Two,keyword}, - } - -# Booklet to bibtex - - Code - toBibtex(bib) - Output - @Booklet{Mustermann2016, - title = {Java Booklet}, - author = {Max Mustermann}, - howpublished = {Internet}, - address = {Stuttgart}, - month = {feb}, - year = {2016}, - note = {Example modified from Jabref}, - keywords = {java}, - } - ---- - - Code - toBibtex(bib) - Output - @Booklet{mustermann:2016, - title = {Java Booklet}, - author = {Max Mustermann}, - year = {2016}, - month = {feb}, - address = {Stuttgart}, - note = {Example modified from Jabref}, - howpublished = {Internet}, - } - -# InBook to bibtex with pages - - Code - toBibtex(bib) - Output - @InBook{, - year = {2003}, - month = {oct}, - pages = {175--196}, - title = {Architectural Mismatch Tolerance}, - chapter = {Tolerances and Other Notes}, - author = {R. {de Lemos} and C. Gacek and A. Romanovsky}, - url = {http://www.cs.kent.ac.uk/pubs/2003/1773}, - publication_type = {inbook}, - submission_id = {12884_1074884456}, - isbn = {3-540-40727-8}, - editor = {A. Lalanda}, - edition = {Fifth}, - publisher = {Springer}, - volume = {2677}, - number = {234}, - address = {Lozoya}, - series = {Lecture Notes in Computer Science}, - type = {Architecting Dependable Systems}, - } - ---- - - Code - toBibtex(bib) - Output - @InBook{delemos_etall:2003, - title = {Architectural Mismatch Tolerance}, - author = {R. {de Lemos} and C. Gacek and A. Romanovsky}, - year = {2003}, - month = {oct}, - publisher = {Springer}, - address = {Lozoya}, - editor = {A. Lalanda}, - series = {Lecture Notes in Computer Science}, - volume = {2677}, - number = {234}, - pages = {175--196}, - isbn = {3-540-40727-8}, - url = {http://www.cs.kent.ac.uk/pubs/2003/1773}, - chapter = {Tolerances and Other Notes}, - edition = {Fifth}, - } - -# InCollection to bibtex - - Code - toBibtex(bib) - Output - @InCollection{, - author = {Klaus Abels}, - title = {Who Gives a Damn about Minimizers in Questions?}, - booktitle = {Proceedings from Semantics and Linguistic Theory {XIII}}, - publisher = {Cornell University}, - year = {2003}, - editor = {Robert B. Young and Yuping Zhou}, - pages = {1--18}, - address = {Ithaca, New York}, - topic = {interrogatives;nl-semantics;polarity;}, - } - ---- - - Code - toBibtex(bib) - Output - @InCollection{abels:2003, - title = {Who Gives a Damn about Minimizers in Questions?}, - author = {Klaus Abels}, - year = {2003}, - booktitle = {Proceedings from Semantics and Linguistic Theory XIII}, - publisher = {Cornell University}, - address = {Ithaca, New York}, - editor = {Robert B. Young and Yuping Zhou}, - pages = {1--18}, - } - -# InProceedings to bibtex - - Code - toBibtex(bib) - Output - @InProceedings{, - author = {John Aberdeen and Samuel Bayer and Sasha Caskey and Laurie Damianos and Alan Goldschen and Lynette Hirschman and Dan Loehr and Hugo Trapper}, - title = {Implementing Practical Dialogue Systems with the - {DARPA} Communicator Architecture}, - booktitle = {Proceedings of the {IJCAI}-99 Workshop on - Knowledge and Reasoning in Practical Dialogue Systems}, - year = {1999}, - editor = {Jan Alexandersson}, - pages = {81--86}, - series = {A Series}, - organization = {IJCAI}, - publisher = {International Joint Conference on Artificial Intelligence}, - address = {Murray Hill, New Jersey}, - } - ---- - - Code - toBibtex(bib) - Output - @InProceedings{aberdeen_etall:1999, - title = {Implementing Practical Dialogue Systems with the DARPA Communicator Architecture}, - author = {John Aberdeen and Samuel Bayer and Sasha Caskey and Laurie Damianos and Alan Goldschen and Lynette Hirschman and Dan Loehr and Hugo Trapper}, - year = {1999}, - booktitle = {Proceedings of the IJCAI-99 Workshop on Knowledge and Reasoning in Practical Dialogue Systems}, - publisher = {International Joint Conference on Artificial Intelligence}, - address = {Murray Hill, New Jersey}, - editor = {Jan Alexandersson}, - pages = {81--86}, - organization = {IJCAI}, - } - ---- - - Code - toBibtex(bib) - Output - @InProceedings{aberdeen_etall:1999, - title = {Implementing Practical Dialogue Systems with the DARPA Communicator Architecture}, - author = {John Aberdeen and Samuel Bayer and Sasha Caskey and Laurie Damianos and Alan Goldschen and Lynette Hirschman and Dan Loehr and Hugo Trapper}, - year = {1999}, - booktitle = {I Am a conference}, - publisher = {International Joint Conference on Artificial Intelligence}, - address = {Murray Hill, New Jersey}, - editor = {Jan Alexandersson}, - pages = {81--86}, - organization = {IJCAI}, - } - -# Manual to bibtex - - Code - toBibtex(bib) - Output - @Manual{, - author = {Gerhard Allwein and Dave Barker-Plummer and Jon Barwise and John Etchemendy}, - title = {{LPL} Software Manual}, - publisher = {{CSLI} Publications}, - year = {1999}, - address = {Stanford, California}, - howpublished = {CD-Rom}, - } - ---- - - Code - toBibtex(bib) - Output - @Manual{allwein_etall:1999, - title = {LPL Software Manual}, - author = {Gerhard Allwein and Dave Barker-Plummer and Jon Barwise and John Etchemendy}, - year = {1999}, - publisher = {CSLI Publications}, - address = {Stanford, California}, - howpublished = {CD-Rom}, - } - -# MastersThesis to bibtex - - Code - toBibtex(bib) - Output - @MastersThesis{, - author = {Murat Bayraktar}, - title = {Computer-Aided Analysis of {E}nglish Punctuation on a - Parsed Corpus: The Special Case of Comma}, - school = {Department of Computer Engineering and Information - Science, Bilkent University, Turkey}, - address = {Ankara, Turkey}, - year = {1996}, - note = {Forthcoming}, - } - ---- - - Code - toBibtex(bib) - Output - @MastersThesis{bayraktar:1996, - title = {Computer-Aided Analysis of English Punctuation on a Parsed Corpus: The Special Case of Comma}, - author = {Murat Bayraktar}, - year = {1996}, - address = {Ankara, Turkey}, - note = {Forthcoming}, - school = {Department of Computer Engineering and Information Science, Bilkent University, Turkey}, - } - -# PhdThesis to bibtex - - Code - toBibtex(bib) - Output - @PhdThesis{, - author = {David I. Beaver}, - title = {Presupposition and Assertion in Dynamic Semantics}, - school = {Centre for Cognitive Science, University of Edinburgh}, - year = {1995}, - type = {Ph.D. Dissertation}, - address = {Edinburgh}, - } - ---- - - Code - toBibtex(bib) - Output - @PhdThesis{beaver:1995, - title = {Presupposition and Assertion in Dynamic Semantics}, - author = {David I. Beaver}, - year = {1995}, - address = {Edinburgh}, - school = {Centre for Cognitive Science, University of Edinburgh}, - } - -# Proceedings to bibtex - - Code - toBibtex(bib) - Output - @Proceedings{, - title = {An Abductive Framework for Negation in Disjunctive - Logic Programming}, - organization = {{JELIA}'96}, - year = {1996}, - editor = {Jose Julio Alferes and Luis Moniz Pereira and Eva Orlowska}, - publisher = {Springer-Verlag}, - address = {Berlin}, - missinginfo = {pages}, - } - ---- - - Code - toBibtex(bib) - Output - @Proceedings{alferes_etall:1996, - title = {An Abductive Framework for Negation in Disjunctive Logic Programming}, - year = {1996}, - publisher = {Springer-Verlag}, - address = {Berlin}, - editor = {Jose Julio Alferes and Luis Moniz Pereira and Eva Orlowska}, - organization = {JELIA'96}, - } - -# TechReport to bibtex - - Code - toBibtex(bib) - Output - @TechReport{, - author = {John M. Aronis}, - title = {Implementing Inheritance on the Connection Machine}, - institution = {Intelligent Systems Program, University of Pittsburgh}, - number = {ISP 93-1}, - year = {1993}, - address = {Pittsburgh, PA 15260}, - } - ---- - - Code - toBibtex(bib) - Output - @TechReport{aronis:1993, - title = {Implementing Inheritance on the Connection Machine}, - author = {John M. Aronis}, - year = {1993}, - address = {Pittsburgh, PA 15260}, - number = {ISP 93-1}, - institution = {Intelligent Systems Program, University of Pittsburgh}, - } - ---- - - Code - toBibtex(bib) - Output - @TechReport{aronis:1993, - title = {Implementing Inheritance on the Connection Machine}, - author = {John M. Aronis}, - year = {1993}, - number = {ISP 93-1}, - institution = {rOpenSci}, - } - -# Unpublished to bibtex - - Code - toBibtex(bib) - Output - @Unpublished{, - author = {John M. Aronis and Foster J. Provost}, - title = {Efficiently Constructing Relational Features from Background}, - year = {1959}, - note = {Unpublished MS, Computer Science Department, University of Pittsburgh.}, - missinginfo = {Date is guess.}, - } - ---- - - Code - toBibtex(bib) - Output - @Unpublished{aronis_etall:1959, - title = {Efficiently Constructing Relational Features from Background}, - author = {John M. Aronis and Foster J. Provost}, - year = {1959}, - note = {Unpublished MS, Computer Science Department, University of Pittsburgh.}, - } - ---- - - Code - toBibtex(bib) - Output - @Unpublished{aronis_etall:1959, - title = {Efficiently Constructing Relational Features from Background}, - author = {John M. Aronis and Foster J. Provost}, - year = {1959}, - note = {Extracted with cffr R package}, - } - -# Test BibLateX entry - - Code - toBibtex(bib) - Output - @Article{, - author = {M. A. Kastenholz and Philippe H. Hünenbergerb}, - title = {Computation of methodology hyphen independent ionic solvation - free energies from molecular simulations}, - journal = {J. Chem. Phys.}, - year = {2006}, - note = {Example modified for testing purposes}, - pages = {55--65}, - date = {2006-03-15}, - file = {a_file.pdf}, - issuetitle = {Semantic {3D} Media and Content}, - translator = {Wicksteed, P. H. and {The translator factory}}, - urldate = {2006-10-01}, - pagetotal = {528}, - abstract = {The computation of ionic solvation free energies from - atomistic simulations is a surprisingly difficult problem that - has found no satisfactory solution for more than 15 years.}, - doi = {10.1063/1.2172593}, - isbn = {0-816-52066-6}, - issn = {0097-8493}, - url = {http://www.ctan.org}, - } - ---- - - Code - toBibtex(parsed) - Output - @Article{kastenholz_etall:2006, - title = {Computation of methodology hyphen independent ionic solvation free energies from molecular simulations}, - author = {M. A. Kastenholz and Philippe H. Hünenbergerb}, - year = {2006}, - month = {mar}, - journal = {J. Chem. Phys.}, - pages = {55--65}, - doi = {10.1063/1.2172593}, - isbn = {0-816-52066-6}, - issn = {0097-8493}, - url = {http://www.ctan.org}, - note = {Example modified for testing purposes}, - abstract = {The computation of ionic solvation free energies from atomistic simulations is a surprisingly difficult problem that has found no satisfactory solution for more than 15 years.}, - date = {2006-03-15}, - file = {a_file.pdf}, - issuetitle = {Semantic 3D Media and Content}, - pagetotal = {528}, - translator = {Wicksteed, P. H. and {The translator factory}}, - urldate = {2006-10-01}, - } - -# particle names - - Code - bibparsed - Output - - type: book - title: A Handbook for Scholars - authors: - - family-names: Leunen - given-names: Mary-Claire - name-particle: van - - family-names: Davis - given-names: Sammy - name-suffix: Jr. - year: '1979' - publisher: - name: Knopf - ---- - - Code - toBibtex(bib) - Output - @Book{vanleunen_etall:1979, - title = {A Handbook for Scholars}, - author = {Mary-Claire {van Leunen} and Sammy {Davis Jr.}}, - year = {1979}, - publisher = {Knopf}, - } - -# From plain cff with a citation - - Code - toBibtex(bib) - Output - @Misc{doe:1999, - title = {title}, - author = {John Doe}, - year = {1999}, - month = {mar}, - editor = {A name}, - } + s <- as_bibentry(a = 1) + Message + x Can't convert to `bibentry()`: + i argument "bibtype" is missing, with no default + ! Returning empty -# From plain cff +# as_bibentry NULL Code - toBibtex(bib) + toBibtex(a_bib) Output - @Misc{doe, - title = {My Research Software}, - author = {John Doe}, + @Misc{basic, + title = {basicdesc: A Basic Description}, + author = {Marc Basic}, + url = {https://basic.github.io/package}, + abstract = {A very basic description. Should parse without problems.}, + version = {0.1.6}, } ---- +# as_bibentry character Code - as_bibentry(cff(), what = "anda") + as_bibentry("invented_package") Condition Error in `as_bibentry()`: - ! `what` should be "preferred", "references", and "all", not "anda". + ! Don't know how to extract a from "invented_package". If it is a package run `install.packages("invented_package")` first. -# From file +--- Code - toBibtex(bib) + as_bibentry(f) Output - @InBook{vanderrealpersoniv_etall:2017, - title = {Book Title}, - author = {One Truly {van der Real Person IV} and {Entity Project Team Conference entity}}, - year = {2017}, - month = {mar}, - journal = {PeerJ}, - publisher = {Entity Project Team Conference entity}, - address = {22 Acacia Avenue, Citationburgh, Renfrewshire, GB}, - editor = {One Truly {van der Real Person IV} and {Entity Project Team Conference entity}}, - series = {Collection Title}, - volume = {2}, - number = {123}, - pages = {123--456}, - doi = {10.5281/zenodo.1003150}, - isbn = {978-1-89183-044-0}, - issn = {1234-543X}, - url = {http://j.mp}, - note = {A field for general notes about the reference, usable in other formats such as BibTeX.}, - chapter = {Chapter 2 - "Reference keys"}, - edition = {2nd edition}, - howpublished = {Hardcover book}, - abstract = {Description of the book.}, - date = {2017-10-31}, - file = {book.zip}, - issuetitle = {Special Issue on Software Citation}, - keywords = {Software,Citation}, - pagetotal = {765}, - translator = {van der Real Person IV, One Truly and {Entity Project Team Conference entity}}, - urldate = {2017-10-31}, - version = {0.0.1423-BETA}, - institution = {Entity Project Team Conference entity}, - } + Basic M (????). "basicdesc: A Basic Description." + . --- Code - as_bibentry("anunkonwpackage") + as_bibentry("invented_package") Condition Error in `as_bibentry()`: - ! Don't know how to extract a from "anunkonwpackage". If it is a package run `install.packages("anunkonwpackage")` first. - -# Test anonymous - - Code - toBibtex(back) - Output - @Booklet{abooklet, - title = {A booklet}, - } - ---- - - Code - toBibtex(back) - Output - @Manual{amanual, - title = {A manual}, - } - ---- - - Code - toBibtex(back) - Output - @Misc{amisc, - title = {A misc}, - } - ---- - - Code - toBibtex(back) - Output - @Proceedings{proceedings:1984, - title = {proceedings}, - year = {1984}, - } - -# Fallback month - - Code - toBibtex(bib) - Output - @Article{, - title = {An Article}, - author = {John Doe}, - journal = {El Adelantado de Segovia}, - year = {1678}, - date = {1678-04-23}, - } + ! Don't know how to extract a from "invented_package". If it is a package run `install.packages("invented_package")` first. --- Code - toBibtex(bibback) + as_bibentry(f) Output - @Article{doe:1678, - title = {An Article}, - author = {John Doe}, - year = {1678}, - month = {apr}, - journal = {El Adelantado de Segovia}, - date = {1678-04-23}, - } + Basic M (????). "basicdesc: A Basic Description." + . -# Test Fallback year +# as_bibentry cff Code - toBibtex(msg) - Output - @Misc{doe, - title = {My Research Software}, - author = {John Doe}, - } + end <- as_bibentry(a_cff, what = "references") + Message + ! In `x` didn't find anything with `what` = "references". Returning empty . --- Code - toBibtex(parsed) + toBibtex(as_bibentry(mod_ref)) Output - @Misc{doe:2020, - title = {My Research Software}, - author = {John Doe}, - year = {2020}, + @Manual{bootstrapmetho:1997, + title = {Bootstrap Methods and Their Applications}, + year = {1997}, + publisher = {Cambridge University Press (Madrid)}, + url = {http://statwww.epfl.ch/davison/BMA/}, + translator = {{Research Translators Ltd.}}, } -# Corrupt entry - - Code - n <- as_bibentry(x) - Message - x Can't convert to `bibentry()`: - i A bibentry of bibtype 'Article' has to specify the fields: journal, year - ! Returning empty - -# default - - Code - s <- as_bibentry(a = 1) - Message - x Can't convert to `bibentry()`: - i argument "bibtype" is missing, with no default - ! Returning empty - diff --git a/tests/testthat/_snaps/as_cff_person.md b/tests/testthat/_snaps/as_cff_person.md index 86569528..65cafcb3 100644 --- a/tests/testthat/_snaps/as_cff_person.md +++ b/tests/testthat/_snaps/as_cff_person.md @@ -12,7 +12,7 @@ Message i In `as_cff_person()` using internal for "txt". -# Parse one person +# Coerce one person Code as_cff_person(p) @@ -20,7 +20,7 @@ - family-names: person given-names: one -# Parse several persons +# Coerce several persons Code as_cff_person(p) @@ -32,7 +32,7 @@ - family-names: more given-names: and one -# Parse bibtex persons +# Coerce bibtex persons Code as_cff_person(s) @@ -52,7 +52,7 @@ - family-names: one given-names: Another -# Parse bibtex persons with masks +# Coerce bibtex persons with masks Code as_cff_person(s) diff --git a/tests/testthat/_snaps/as_cff_reference.md b/tests/testthat/_snaps/as_cff_reference.md index 89902239..ea1ce50b 100644 --- a/tests/testthat/_snaps/as_cff_reference.md +++ b/tests/testthat/_snaps/as_cff_reference.md @@ -1,315 +1,7 @@ -# Test full with CITATION and (option = author) - - Code - cffobj - Output - cff-version: 1.2.0 - message: 'To cite package "rgeos" in publications use:' - type: software - license: GPL-2.0-or-later - title: 'rgeos: Interface to Geometry Engine - Open Source (''GEOS'')' - version: 0.5-7 - abstract: 'Interface to Geometry Engine - Open Source (''GEOS'') using the C ''API'' - for topology operations on geometries. Please note that ''rgeos'' will be retired - by the end of 2023, plan transition to sf functions using ''GEOS'' at your earliest - convenience. The ''GEOS'' library is external to the package, and, when installing - the package from source, must be correctly installed first. Windows and Mac Intel - OS X binaries are provided on ''CRAN''. (''rgeos'' >= 0.5-1): Up to and including - ''GEOS'' 3.7.1, topological operations succeeded with some invalid geometries for - which the same operations fail from and including ''GEOS'' 3.7.2. The ''checkValidity='' - argument defaults and structure have been changed, from default FALSE to integer - default ''0L'' for ''GEOS'' < 3.7.2 (no check), ''1L'' ''GEOS'' >= 3.7.2 (check - and warn). A value of ''2L'' is also provided that may be used, assigned globally - using ''set_RGEOS_CheckValidity(2L)'', or locally using the ''checkValidity=2L'' - argument, to attempt zero-width buffer repair if invalid geometries are found. The - previous default (FALSE, now ''0L'') is fastest and used for ''GEOS'' < 3.7.2, but - will not warn users of possible problems before the failure of topological operations - that previously succeeded. From ''GEOS'' 3.8.0, repair of geometries may also be - attempted using ''gMakeValid()'', which may, however, return a collection of geometries - of different types.' - authors: - - family-names: Bivand - given-names: Roger - email: Roger.Bivand@nhh.no - orcid: https://orcid.org/0000-0003-2392-6140 - - family-names: Rundel - given-names: Colin - repository: https://CRAN.R-project.org/package=rgeos - repository-code: https://r-forge.r-project.org/projects/rgeos/ - url: https://trac.osgeo.org/geos/ - date-released: '2020-09-07' - contact: - - family-names: Bivand - given-names: Roger - email: Roger.Bivand@nhh.no - orcid: https://orcid.org/0000-0003-2392-6140 - references: - - type: manual - title: 'rgeos: Interface to Geometry Engine - Open Source (''GEOS'')' - authors: - - family-names: Bivand - given-names: Roger - email: Roger.Bivand@nhh.no - orcid: https://orcid.org/0000-0003-2392-6140 - - family-names: Rundel - given-names: Colin - year: '2020' - notes: R package version 0.5-7 - url: https://CRAN.R-project.org/package=rgeos - - type: article - title: 'RNeXML: A Package for Reading and Writing Richly Annotated Phylogenetic, - Character, and Trait Data in R' - authors: - - family-names: Boettiger - given-names: Carl - - family-names: Chamberlain - given-names: Scott - - family-names: Vos - given-names: Rutger - - family-names: Lapp - given-names: Hilmar - journal: Methods in Ecology and Evolution - year: '2016' - volume: '7' - doi: 10.1111/2041-210X.12469 - start: '352' - end: '357' - - type: book - title: 'ggplot2: Elegant Graphics for Data Analysis' - authors: - - family-names: Wickham - given-names: Hadley - publisher: - name: Springer-Verlag New York - year: '2016' - isbn: 978-3-319-24277-4 - url: https://ggplot2.tidyverse.org - identifiers: - - type: url - value: http://rgeos.r-forge.r-project.org/index.html - -# Parsed several citations - - Code - citobj - Output - - type: manual - title: 'rgeos: Interface to Geometry Engine - Open Source (''GEOS'')' - authors: - - family-names: Bivand - given-names: Roger - email: Roger.Bivand@nhh.no - orcid: https://orcid.org/0000-0003-2392-6140 - - family-names: Rundel - given-names: Colin - year: '2020' - notes: R package version 0.5-7 - url: https://CRAN.R-project.org/package=rgeos - - type: article - title: 'RNeXML: A Package for Reading and Writing Richly Annotated Phylogenetic, - Character, and Trait Data in R' - authors: - - family-names: Boettiger - given-names: Carl - - family-names: Chamberlain - given-names: Scott - - family-names: Vos - given-names: Rutger - - family-names: Lapp - given-names: Hilmar - journal: Methods in Ecology and Evolution - year: '2016' - volume: '7' - doi: 10.1111/2041-210X.12469 - start: '352' - end: '357' - - type: book - title: 'ggplot2: Elegant Graphics for Data Analysis' - authors: - - family-names: Wickham - given-names: Hadley - publisher: - name: Springer-Verlag New York - year: '2016' - isbn: 978-3-319-24277-4 - url: https://ggplot2.tidyverse.org - -# Add wrong field to citation - - Code - cffobj - Output - cff-version: 1.2.0 - message: If you use this software, please cite it using these metadata. - title: My Research Software - authors: - - family-names: Doe - given-names: John - references: - - type: manual - title: favoritefood is not valid on cff schema - authors: - - family-names: Smith - given-names: Jane - -# Fix wrong orcid - - Code - cffobj - Output - cff-version: 1.2.0 - message: If you use this software, please cite it using these metadata. - title: My Research Software - authors: - - family-names: Doe - given-names: John - references: - - type: manual - title: Wrong orcid fixed by cffr - authors: - - family-names: Smith - given-names: Jane - orcid: https://orcid.org/0000-0000-0000-306X - -# Several identifiers and duplicates - - Code - cffobj - Output - cff-version: 1.2.0 - message: If you use this software, please cite it using these metadata. - title: My Research Software - authors: - - family-names: Doe - given-names: John - references: - - type: manual - title: A Language and Environment for Statistical Computing - authors: - - name: R Core Team - year: '2022' - url: https://www.R-project.org/ - doi: 10.5281/zenodo.5366600 - identifiers: - - type: doi - value: 10.5281/zenodo.5366601 - - type: doi - value: 10.5281/zenodo.5366602 - - type: url - value: https://google.com/ - -# Test keywords and urls - - Code - cffobj - Output - cff-version: 1.2.0 - message: If you use this software, please cite it using these metadata. - title: My Research Software - authors: - - family-names: Doe - given-names: John - references: - - type: manual - title: A Language and Environment for Statistical Computing - authors: - - name: R Core Team - year: '2022' - url: https://www.R-project.org/ - keywords: - - Some - - random keywords - - in - - here - identifiers: - - type: url - value: https://google.com/ - -# Parse persons on CITATION - - Code - bibparsed - Output - - type: manual - title: A Language and Environment for Statistical Computing - authors: - - name: R Core Team - year: '2021' - contact: - - family-names: name - given-names: A - - family-names: contact - given-names: A - conference: - name: A conference - address: A location - database-provider: - name: Database provider - editors: - - family-names: editor - given-names: A - - name: Ben and Jerry - editors-series: - - family-names: editor series - given-names: An - - name: Another - publisher: - name: A publisher - recipients: - - family-names: recipient - given-names: A - senders: - - name: A Sender - - family-names: Sender - given-names: Another - translators: - - family-names: one - given-names: Translator - - family-names: two - given-names: Translator - -# Test inputs - - Code - bibparsed - Output - - type: book - title: Test - authors: - - family-names: Jean - given-names: Billy - year: '2021' - publisher: - name: Random House - -# Fallback date - - Code - init_cff - Output - - type: generic - title: Test - authors: - - family-names: Jean - given-names: Billy - date-published: '2050-01-12' - date-accessed: '2099-02-02' - publisher: - name: Random House - month: '1' - year: '2050' - -# Duplicates - - Code - uniq <- as_cff(bibend) - Message - i Removing duplicate objects. - # Article Code - bibparsed + bib_cff Output - type: article title: Literate Programming @@ -327,7 +19,7 @@ # Book Code - bibparsed + bib_cff Output - type: book title: The LaTeX Companion @@ -362,7 +54,7 @@ # Booklet Code - bibparsed + bib_cff Output - type: pamphlet title: Java Booklet @@ -379,7 +71,7 @@ # Conference Code - bibparsed + bib_cff Output - type: conference-paper title: On Notions of Information Transfer in VLSI Circuits @@ -414,7 +106,7 @@ # InBook Code - bibparsed + bib_cff Output - type: book title: A Framework for Freeness Analysis @@ -444,7 +136,7 @@ # InCollection Code - bibparsed + bib_cff Output - type: generic title: Knowledge-Based Methods for WSD @@ -474,7 +166,7 @@ # InProceedings Code - bibparsed + bib_cff Output - type: conference-paper title: On Notions of Information Transfer in VLSI Circuits @@ -506,10 +198,30 @@ name: Proc. Fifteenth Annual ACM STOC address: Boston +--- + + Code + toBibtex(bib) + Output + @InProceedings{oaho_etall:1983, + title = {On Notions of Information Transfer in VLSI Circuits}, + author = {Alfred V. Oaho and Jeffrey D. Ullman and Mihalis Yannakakis}, + year = {1983}, + booktitle = {I Am a conference}, + publisher = {Academic Press}, + address = {Boston}, + editor = {Wizard V. Oz and Mihalis Yannakakis}, + volume = {41}, + number = {17}, + pages = {133--139}, + note = {Example modified for testing purposes}, + organization = {ACM}, + } + # Manual Code - bibparsed + bib_cff Output - type: manual title: A Language and Environment for Statistical Computing @@ -526,7 +238,7 @@ # MastersThesis Code - bibparsed + bib_cff Output - type: thesis title: An examination of keystroke dynamics for continuous user authentication @@ -544,7 +256,7 @@ # Misc Code - bibparsed + bib_cff Output - type: generic title: A Language and Environment for Statistical Computing @@ -558,7 +270,7 @@ # PhdThesis Code - bibparsed + bib_cff Output - type: thesis title: An examination of keystroke dynamics for continuous user authentication @@ -576,7 +288,7 @@ # Proceedings Code - bibparsed + bib_cff Output - type: proceedings title: Proc. Fifteenth Annual STOC @@ -605,7 +317,7 @@ # TechReport Code - bibparsed + bib_cff Output - type: report title: Naive tools for studying compilation histories @@ -625,7 +337,7 @@ # Unpublished Code - bibparsed + bib_cff Output - type: unpublished title: Demonstratives @@ -636,10 +348,23 @@ year: '1977' month: '8' +--- + + Code + toBibtex(bib) + Output + @Unpublished{kaplan:1977, + title = {Demonstratives}, + author = {D. Kaplan}, + year = {1977}, + month = {aug}, + note = {Extracted with cffr R package}, + } + # InBook with booktitle Code - bibparsed + bib_cff Output - type: generic title: Bibliographies and citations @@ -663,7 +388,7 @@ # Test entry without author Code - bibparsed + bib_cff Output - type: proceedings title: Proceedings of the 6th European Conference on Computer Systems @@ -687,7 +412,7 @@ # Test entry without author but has a key Code - bibparsed + bib_cff Output - type: generic title: Proceedings of the 6th European Conference on Computer Systems @@ -704,7 +429,7 @@ # Test entry without author and key Code - bibparsed + bib_cff Output - type: generic title: Proceedings of the 6th European Conference on Computer Systems @@ -755,7 +480,7 @@ # Check extended BibLatex Fields Code - bibparsed + bib_cff Output - type: article title: Computation of methodology hyphen independent ionic solvation free energies @@ -789,3 +514,52 @@ end: '65' month: '3' +# Duplicate entries + + Code + bib_cff <- as_cff(rep(bib, 2)) + Message + i Removing duplicate objects. + +# Identifiers and dois + + Code + as_cff(checf) + Output + url: https://www.R-project.org/ + doi: 10.5281/zenodo.5366600 + identifiers: + - type: doi + value: 10.5281/zenodo.5366601 + - type: doi + value: 10.5281/zenodo.5366602 + - type: url + value: https://google.com/ + +--- + + Code + cffobj + Output + cff-version: 1.2.0 + message: If you use this software, please cite it using these metadata. + title: My Research Software + authors: + - family-names: Doe + given-names: John + references: + - type: manual + title: A Language and Environment for Statistical Computing + authors: + - name: R Core Team + year: '2022' + url: https://www.R-project.org/ + doi: 10.5281/zenodo.5366600 + identifiers: + - type: doi + value: 10.5281/zenodo.5366601 + - type: doi + value: 10.5281/zenodo.5366602 + - type: url + value: https://google.com/ + diff --git a/tests/testthat/_snaps/cff_create.md b/tests/testthat/_snaps/cff_create.md index 968ab77e..0264c035 100644 --- a/tests/testthat/_snaps/cff_create.md +++ b/tests/testthat/_snaps/cff_create.md @@ -1,3 +1,26 @@ +# Test indev + + Code + a_cff + Output + cff-version: 1.2.0 + message: 'To cite package "basicdesc" in publications use:' + type: software + license: GPL-3.0-only + title: 'basicdesc: A Basic Description' + version: 0.1.6 + abstract: A very basic description. Should parse without problems. + authors: + - family-names: Basic + given-names: Marc + email: marcbasic@gmail.com + repository-code: https://github.com/basic/package + url: https://basic.github.io/package + contact: + - family-names: Basic + given-names: Marc + email: marcbasic@gmail.com + # No auto generate preferred citations Code @@ -92,10 +115,10 @@ --- -# Parse date +# Coerce date Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "rgeos" in publications use:' @@ -141,10 +164,10 @@ - type: url value: http://rgeos.r-forge.r-project.org/index.html -# Parse date in another format +# Coerce date in another format Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "basicdescdate" in publications use:' @@ -168,7 +191,7 @@ # Parsing many urls Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "manyurls" in publications use:' @@ -200,7 +223,7 @@ # Parsing Gitlab Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "codemetar" in publications use:' @@ -239,7 +262,7 @@ # Parsing many persons Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "manypersons" in publications use:' @@ -284,7 +307,7 @@ # Parsing wrong urls Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "wrongurls" in publications use:' @@ -322,7 +345,7 @@ # Parsing two maintainers Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "codemetar" in publications use:' @@ -357,7 +380,7 @@ # Parsing r-universe Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "codemetar" in publications use:' @@ -397,7 +420,7 @@ # Parsing Bioconductor Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "GenomicRanges" in publications use:' @@ -433,7 +456,7 @@ # Parsing Posit Package Manager Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "resmush" in publications use:' @@ -466,7 +489,7 @@ # Search package on CRAN Code - parsed + a_cff Output cff-version: 1.2.0 message: 'To cite package "ggplot2" in publications use:' diff --git a/tests/testthat/_snaps/cff_gha_update.md b/tests/testthat/_snaps/cff_gha_update.md new file mode 100644 index 00000000..6b4eae5d --- /dev/null +++ b/tests/testthat/_snaps/cff_gha_update.md @@ -0,0 +1,23 @@ +# Test in mock package + + Code + cff_gha_update() + Message + i Creating directory './.github/workflows'. + v Installing './.github/workflows/update-citation-cff.yaml' + i Adding ".github" to '.Rbuildignore' + +--- + + Code + cff_gha_update() + Message + ! File './.github/workflows/update-citation-cff.yaml' already installed. Use `overwrite = TRUE` for overwrite + +--- + + Code + cff_gha_update(overwrite = TRUE) + Message + v Installing './.github/workflows/update-citation-cff.yaml' + diff --git a/tests/testthat/_snaps/cff_read.md b/tests/testthat/_snaps/cff_read.md index b6e4e4be..8e0e2073 100644 --- a/tests/testthat/_snaps/cff_read.md +++ b/tests/testthat/_snaps/cff_read.md @@ -11,32 +11,32 @@ Code cff_read("abcde") Condition - Error in `cff_read()`: - ! 'abcde' does not exist. Check the '.' directory + Error in `file_exist_abort()`: + ! 'abcde' doesn't exist. Check the '.' directory # cff_read citation.cff Code cff_read_cff_citation("a") Condition - Error in `cff_read_cff_citation()`: - ! 'a' does not exist. Check the '.' directory + Error in `file_exist_abort()`: + ! 'a' doesn't exist. Check the '.' directory # cff_read DESCRIPTION Code cff_read_description("a") Condition - Error in `cff_read_description()`: - ! 'a' does not exist. Check the '.' directory + Error in `file_exist_abort()`: + ! 'a' doesn't exist. Check the '.' directory # cff_read bib Code cff_read_bib("a") Condition - Error in `cff_read_bib()`: - ! 'a' does not exist. Check the '.' directory + Error in `file_exist_abort()`: + ! 'a' doesn't exist. Check the '.' directory --- @@ -65,8 +65,8 @@ Code cff_read_citation("a") Condition - Error in `cff_read_citation()`: - ! 'a' does not exist. Check the '.' directory + Error in `file_exist_abort()`: + ! 'a' doesn't exist. Check the '.' directory --- diff --git a/tests/testthat/_snaps/methods.md b/tests/testthat/_snaps/methods.md index 8aa656f5..ed342e13 100644 --- a/tests/testthat/_snaps/methods.md +++ b/tests/testthat/_snaps/methods.md @@ -922,6 +922,20 @@ [1] "One Truly van der Real Person IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" [2] "Entity Project Team Conference entity (22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io, 2017-01-01, 2017-01-31, The team garage)" +--- + + Code + end <- as.person(malf) + Message + i Can't create for some elements of `x`. + +--- + + Code + aa2 <- as.person(aa) + Message + i Removing duplicate objects. + # Errors on other as.person methods Code diff --git a/tests/testthat/_snaps/utils-alerts.md b/tests/testthat/_snaps/utils-alerts.md new file mode 100644 index 00000000..47452c3e --- /dev/null +++ b/tests/testthat/_snaps/utils-alerts.md @@ -0,0 +1,56 @@ +# Check abort_if_not_cff + + Code + abort_if_not_cff(err) + Condition + Error in `abort_if_not_cff()`: + ! `x` is not a '*.cff' file. + +--- + + Code + abort_if_not_cff(x) + Condition + Error in `abort_if_not_cff()`: + ! `x` is an object of class , not . + +# Check file_exist_abort + + Code + file_exist_abort(nonexits, abort = TRUE) + Condition + Error in `file_exist_abort()`: + ! 'aaaaaaaa' doesn't exist. Check the '.' directory + +--- + + Code + file_exist_abort(non_with_path, abort = TRUE) + Condition + Error in `file_exist_abort()`: + ! './R/no/existe' doesn't exist. Check the './R/no' directory + +# Check match_cff_arg + + Code + match_cff_arg("a", "b", "...") + Condition + Error in `match_cff_arg()`: + ! `...` should be "b", not "a". + +--- + + Code + match_cff_arg("a", c("b", "c", "d"), "what") + Condition + Error in `match_cff_arg()`: + ! `what` should be "b", "c", or "d", not "a". + +--- + + Code + my_fun() + Condition + Error in `my_fun()`: + ! `aa` should be "A", "B", "C", "D", or "E", not "a". + diff --git a/tests/testthat/test-as_bibentry.R b/tests/testthat/test-as_bibentry.R index 3cd11b5f..2ef70291 100644 --- a/tests/testthat/test-as_bibentry.R +++ b/tests/testthat/test-as_bibentry.R @@ -1,6 +1,6 @@ -# Test Bibtex ---- -test_that("Article to bibtex", { - bib <- bibentry("Article", +test_that("as_bibentry default", { + bib_coerced <- as_bibentry( + bibtype = "Article", key = "knuth:1984", author = person("R Core Team"), title = "Literate Programming", @@ -13,438 +13,120 @@ test_that("Article to bibtex", { month = "January", keywords = "Some, simple, keywords" ) - expect_snapshot(toBibtex(bib)) - x <- as_cff(bib) - bib <- as_bibentry(x) - expect_snapshot(toBibtex(bib)) -}) - -test_that("Book to bibtex", { - bib <- bibentry("Book", - key = "latex:companion", - author = "Frank Mittelbach and Michel Gossens - and Johannes Braams and David Carlisle - and Chris Rowley", - editor = "{Barnes and Noble}", - title = "The LaTeX Companion", - publisher = "Addison-Wesley Professional", - year = "2004", + direct <- bibentry( + bibtype = "Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", # Optional - volume = "3", - number = 7, - series = "The LateX Books", - address = "Santa Monica", - edition = "Fourth", - month = "August", - note = "Example modified for testing purposes", - keywords = c("Two, keyword") + volume = "27", + number = 2, + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" ) + expect_identical(bib_coerced, direct) - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - - -test_that("Booklet to bibtex", { - bib <- bibentry("Booklet", - key = "Mustermann2016", - title = "Java Booklet", + with_number_first <- as_bibentry( + number = 2, + bibtype = "Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", # Optional - author = "Max Mustermann", - howpublished = "Internet", - address = "Stuttgart", - month = "feb", - year = "2016", - note = "Example modified from Jabref", - keywords = "java" - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - -test_that("InBook to bibtex with pages", { - bib <- bibentry("InBook", - year = "2003", - month = "oct", - pages = "175--196", - title = "Architectural Mismatch Tolerance", - chapter = "Tolerances and Other Notes", - author = "R. de Lemos and C. Gacek and A. Romanovsky", - URL = "http://www.cs.kent.ac.uk/pubs/2003/1773", - publication_type = "inbook", - submission_id = "12884_1074884456", - ISBN = "3-540-40727-8", - editor = "A. Lalanda", - edition = "Fifth", - publisher = "Springer", - volume = "2677", - number = "234", - address = "Lozoya", - series = "Lecture Notes in Computer Science", - type = "Architecting Dependable Systems", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - -test_that("InCollection to bibtex", { - bib <- bibentry("InCollection", - author = "Klaus Abels", - title = "Who Gives a Damn about Minimizers in Questions?", - booktitle = "Proceedings from Semantics and Linguistic Theory {XIII}", - publisher = "Cornell University", - year = 2003, - editor = "Robert B. Young and Yuping Zhou", - pages = "1--18", - address = "Ithaca, New York", - topic = "interrogatives;nl-semantics;polarity;" - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - - -test_that("InProceedings to bibtex", { - bib <- bibentry("InProceedings", - author = "John Aberdeen and Samuel Bayer and Sasha Caskey and Laurie - Damianos and Alan Goldschen and Lynette Hirschman and - Dan Loehr and Hugo Trapper", - title = "Implementing Practical Dialogue Systems with the - {DARPA} Communicator Architecture", - booktitle = "Proceedings of the {IJCAI}-99 Workshop on - Knowledge and Reasoning in Practical Dialogue Systems", - year = 1999, - editor = "Jan Alexandersson", - pages = "81--86", - series = "A Series", - organization = "IJCAI", - publisher = "International Joint Conference on Artificial Intelligence", - address = "Murray Hill, New Jersey", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) - - # If we remove collection title use conference - bibparsed[[1]]$`collection-title` <- NULL - bibparsed[[1]]$conference$name <- "I Am a conference" - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - - -test_that("Manual to bibtex", { - bib <- bibentry("Manual", - author = "Gerhard Allwein and Dave Barker-Plummer and Jon Barwise - and John Etchemendy", - title = "{LPL} Software Manual", - publisher = "{CSLI} Publications", - year = 1999, - address = "Stanford, California", - howpublished = "CD-Rom", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - - -test_that("MastersThesis to bibtex", { - bib <- bibentry("MastersThesis", - author = "Murat Bayraktar", - title = "Computer-Aided Analysis of {E}nglish Punctuation on a - Parsed Corpus: The Special Case of Comma", - school = "Department of Computer Engineering and Information - Science, Bilkent University, Turkey", - address = "Ankara, Turkey", - year = 1996, - note = "Forthcoming", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - - -test_that("PhdThesis to bibtex", { - bib <- bibentry("PhdThesis", - author = "David I. Beaver", - title = "Presupposition and Assertion in Dynamic Semantics", - school = "Centre for Cognitive Science, University of Edinburgh", - year = 1995, - type = "Ph.D. Dissertation", - address = "Edinburgh", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - - -test_that("Proceedings to bibtex", { - bib <- bibentry("Proceedings", - title = "An Abductive Framework for Negation in Disjunctive - Logic Programming", - organization = "{JELIA}'96", - year = 1996, - editor = "Jose Julio Alferes and Luis Moniz Pereira and Eva Orlowska", - publisher = "Springer-Verlag", - address = "Berlin", - missinginfo = "pages", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - -test_that("TechReport to bibtex", { - bib <- bibentry("TechReport", - author = person("John M.", "Aronis", - comment = c(affiliation = "rOpenSci") - ), - title = "Implementing Inheritance on the Connection Machine", - institution = "Intelligent Systems Program, University of Pittsburgh", - number = "ISP 93-1", - year = 1993, - address = "Pittsburgh, PA 15260", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) - - # Fallback when missing institution - bibparsed[[1]]$institution <- NULL - - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - -test_that("Unpublished to bibtex", { - bib <- bibentry("Unpublished", - author = "John M. Aronis and Foster J. Provost", - title = "Efficiently Constructing Relational Features from Background", - year = 1959, - note = paste0( - "Unpublished MS, Computer Science Department, ", - "University of Pittsburgh." - ), - missinginfo = "Date is guess.", - ) - - expect_snapshot(toBibtex(bib)) - bibparsed <- as_cff(bib) - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) - - # With custom note - bibparsed[[1]]$notes <- NULL - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - -test_that("Test BibLateX entry", { - bib <- bibentry("Article", - author = "M. A. Kastenholz, and Philippe H. Hünenbergerb", - title = "Computation of methodology hyphen independent ionic solvation - free energies from molecular simulations", - journal = "J. Chem. Phys.", - year = 2006, - note = "Example modified for testing purposes", - pages = "55--65", - - # Additional BibLatex Fields - date = "2006-03-15", - file = "a_file.pdf", - issuetitle = "Semantic {3D} Media and Content", - translator = "Wicksteed, P. H. and {The translator factory}", - urldate = "2006-10-01", - pagetotal = 528, - abstract = "The computation of ionic solvation free energies from - atomistic simulations is a surprisingly difficult problem that - has found no satisfactory solution for more than 15 years.", - doi = "10.1063/1.2172593", - isbn = "0-816-52066-6", - issn = "0097-8493", - url = "http://www.ctan.org" - ) - expect_snapshot(toBibtex(bib)) - x <- as_cff(bib) - - - parsed <- as_bibentry(x) - expect_snapshot(toBibtex(parsed)) -}) - - -# Other testers ---- - -test_that("particle names", { - bib <- bibentry("Book", - title = "A Handbook for Scholars", - author = person("Mary-Claire", "van Leunen"), - year = 1979, - publisher = "Knopf" - ) - - - bibparsed <- as_cff(bib) - bibparsed[[1]]$authors <- as_cff_person( - "van Leunen, Mary-Claire and Davis, Jr., Sammy" - ) - - cffobj <- cff_create(cff(), - keys = list(references = bibparsed) - ) - - expect_true(cff_validate(cffobj, verbose = FALSE)) - - expect_snapshot(bibparsed) - - bib <- as_bibentry(bibparsed) - expect_snapshot(toBibtex(bib)) -}) - -test_that("From plain cff with a citation", { - s <- cff() - s <- cff_create(s) - - acit <- bibentry( - bibtype = "misc", title = "title", year = 1999, - author = "John Doe", - month = 3 + volume = "27", + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" ) - s$`preferred-citation` <- as_cff(acit)[[1]] - s$`preferred-citation`$editors <- as_cff_person("A name") - - bib <- as_bibentry(s) - expect_snapshot(toBibtex(bib)) -}) - -test_that("From plain cff", { - expect_silent(bib <- as_bibentry(cff())) - expect_snapshot(toBibtex(bib)) - - - expect_snapshot(as_bibentry(cff(), what = "anda"), error = TRUE) -}) - -test_that("From file", { - file <- system.file("examples/CITATION_complete.cff", - package = "cffr" + direct <- bibentry( + number = 2, + bibtype = "Article", + key = "knuth:1984", + author = person("R Core Team"), + title = "Literate Programming", + journal = "The Computer Journal", + year = "1984", + # Optional + volume = "27", + pages = "97--111", + month = "January", + keywords = "Some, simple, keywords" ) - bib <- as_bibentry(file) - expect_snapshot(toBibtex(bib)) + expect_identical(with_number_first, direct) - expect_snapshot(as_bibentry("anunkonwpackage"), error = TRUE) + # No additional dots + expect_snapshot(s <- as_bibentry(a = 1)) }) +test_that("as_bibentry NULL", { + skip_on_cran() + current_dir <- getwd() -test_that("Test anonymous", { - bib <- bibentry("Booklet", - title = "A booklet" - ) - - - expect_silent(back <- as_bibentry(as_cff(bib))) - expect_snapshot(toBibtex(back)) + name <- paste0("mock-pack", runif(1) * 10) + new_dir <- file.path(tempdir(), name) + dir.create(new_dir, recursive = TRUE) - bib <- bibentry("manual", - title = "A manual" - ) - + expect_true(dir.exists(new_dir)) - expect_silent(back <- as_bibentry(as_cff(bib))) - expect_snapshot(toBibtex(back)) + setwd(new_dir) - bib <- bibentry("misc", - title = "A misc" + # Move files + file.copy(system.file("examples/DESCRIPTION_basic", package = "cffr"), + to = "DESCRIPTION" ) + # Get bibentry + a_bib <- as_bibentry() - expect_silent(back <- as_bibentry(as_cff(bib))) - expect_snapshot(toBibtex(back)) - bib <- bibentry("proceedings", - title = "proceedings", - year = 1984 - ) + # Revert to initial wd + setwd(current_dir) + expect_snapshot(toBibtex(a_bib)) - expect_silent(back <- as_bibentry(as_cff(bib))) - expect_snapshot(toBibtex(back)) + unlink(new_dir, recursive = TRUE, force = TRUE) }) -test_that("Fallback month", { - bib <- bibentry("Article", - title = "An Article", - author = "John Doe", - journal = "El Adelantado de Segovia", - year = "1678", - date = "1678-04-23" - ) - - expect_snapshot(toBibtex(bib)) - x <- as_cff(bib) - # Delete here the month - x$month <- NULL - bibback <- as_bibentry(x) - expect_snapshot(toBibtex(bibback)) -}) +test_that("as_bibentry character", { + skip_if_not_installed("rmarkdown") + base <- as_bibentry("rmarkdown") -test_that("Test Fallback year", { - x <- cff() + expect_s3_class(base, "bibentry") - expect_silent(msg <- as_bibentry(x)) + expect_length(base, 1) - expect_snapshot(toBibtex(msg)) + refs <- as_bibentry("rmarkdown", what = "references") + expect_s3_class(refs, "bibentry") + expect_gte(length(refs), 1) - x$`date-released` <- "2020-01-01" + all <- as_bibentry("rmarkdown", what = "all") + expect_s3_class(all, "bibentry") - expect_true(cff_validate(x, verbose = FALSE)) + expect_length(all, length(base) + length(refs)) - parsed <- as_bibentry(x) + # If an invented package + expect_snapshot(as_bibentry("invented_package"), error = TRUE) - expect_snapshot(toBibtex(parsed)) -}) + # If another kind of file + f <- system.file("examples/DESCRIPTION_basic", package = "cffr") -test_that("Errors", { - expect_silent(b <- as_bibentry("testthat")) - expect_s3_class(b, "bibentry") - expect_error(as_bibentry("testthat", what = "aa")) + expect_snapshot(as_bibentry(f)) }) -test_that("From package", { +test_that("as_bibentry character", { skip_if_not_installed("rmarkdown") base <- as_bibentry("rmarkdown") @@ -462,134 +144,33 @@ test_that("From package", { expect_s3_class(all, "bibentry") expect_length(all, length(base) + length(refs)) -}) -test_that("NULL references", { - basic <- cff() + # If an invented package + expect_snapshot(as_bibentry("invented_package"), error = TRUE) - expect_identical( - as_bibentry(basic, what = "references"), - bibentry() - ) + # If another kind of file + f <- system.file("examples/DESCRIPTION_basic", package = "cffr") - # Test all - expect_silent(l <- as_bibentry(basic, what = "all")) - expect_length(l, 1) + expect_snapshot(as_bibentry(f)) }) +test_that("as_bibentry cff", { + f <- system.file("examples/CITATION_basic.cff", package = "cffr") -test_that("From CITATION.cff", { - p <- system.file("examples/smith-et-al.cff", package = "cffr") + a_cff <- cff_read(f) + theref <- a_cff$references[[1]] + a_cff <- cff_modify(a_cff, references = NULL) + expect_null(a_cff$references) - base <- as_bibentry(p) + expect_snapshot(end <- as_bibentry(a_cff, what = "references")) + expect_identical(end, bibentry()) - expect_s3_class(base, "bibentry") - expect_length(base, 1) -}) - -test_that("Corrupt entry", { - bib <- bibentry("Article", - key = "knuth:1984", - author = person("R Core Team"), - title = "Literate Programming", - journal = "The Computer Journal", - year = "1984", - # Optional - volume = "27", - number = 2, - pages = "97--111", - month = "January", - keywords = "Some, simple, keywords" - ) - x <- as_cff(bib)[[1]] - x$year <- NULL - x$journal <- NULL - expect_snapshot(n <- as_bibentry(x)) - expect_identical(bibentry(), bibentry()) -}) - - -test_that("default", { - bib_coerced <- as_bibentry( - bibtype = "Article", - key = "knuth:1984", - author = person("R Core Team"), - title = "Literate Programming", - journal = "The Computer Journal", - year = "1984", - # Optional - volume = "27", - number = 2, - pages = "97--111", - month = "January", - keywords = "Some, simple, keywords" + expect_s3_class(theref, c("cff_ref", "cff"), exact = TRUE) + mod_ref <- cff_modify(theref, + type = "manual", + authors = as_cff_person("{anonymous}") ) - direct <- bibentry( - bibtype = "Article", - key = "knuth:1984", - author = person("R Core Team"), - title = "Literate Programming", - journal = "The Computer Journal", - year = "1984", - # Optional - volume = "27", - number = 2, - pages = "97--111", - month = "January", - keywords = "Some, simple, keywords" - ) - expect_identical(bib_coerced, direct) - - with_number_first <- as_bibentry( - number = 2, - bibtype = "Article", - key = "knuth:1984", - author = person("R Core Team"), - title = "Literate Programming", - journal = "The Computer Journal", - year = "1984", - # Optional - volume = "27", - pages = "97--111", - month = "January", - keywords = "Some, simple, keywords" - ) - - direct <- bibentry( - number = 2, - bibtype = "Article", - key = "knuth:1984", - author = person("R Core Team"), - title = "Literate Programming", - journal = "The Computer Journal", - year = "1984", - # Optional - volume = "27", - pages = "97--111", - month = "January", - keywords = "Some, simple, keywords" - ) - - expect_identical(with_number_first, direct) - - with_number_first <- as_bibentry( - number = 2, - bibtype = "Article", - key = "knuth:1984", - author = person("R Core Team"), - title = "Literate Programming", - journal = "The Computer Journal", - year = "1984", - # Optional - volume = "27", - pages = "97--111", - month = "January", - keywords = "Some, simple, keywords" - ) - - - # No additional dots - expect_snapshot(s <- as_bibentry(a = 1)) + expect_snapshot(toBibtex(as_bibentry(mod_ref))) }) diff --git a/tests/testthat/test-as_cff_person.R b/tests/testthat/test-as_cff_person.R index 745ca610..f3183e6c 100644 --- a/tests/testthat/test-as_cff_person.R +++ b/tests/testthat/test-as_cff_person.R @@ -21,12 +21,12 @@ test_that("debugging messages", { options("cffr_message_verbosity" = initopt) }) -test_that("Parse one person", { +test_that("Coerce one person", { p <- person("one", "person") expect_snapshot(as_cff_person(p)) }) -test_that("Parse several persons", { +test_that("Coerce several persons", { p <- c( person("one", "person"), person("another", "human"), person("and one", "more") @@ -35,7 +35,7 @@ test_that("Parse several persons", { }) -test_that("Parse bibtex persons", { +test_that("Coerce bibtex persons", { s <- "Wright, III, Frank Edwin" expect_snapshot(as_cff_person(s)) @@ -49,7 +49,7 @@ test_that("Parse bibtex persons", { expect_identical(as_cff_person(s), as_cff_person(s2)) }) -test_that("Parse bibtex persons with masks", { +test_that("Coerce bibtex persons with masks", { s <- "Elephant and Castle" expect_snapshot(as_cff_person(s)) diff --git a/tests/testthat/test-as_cff_reference.R b/tests/testthat/test-as_cff_reference.R index 883d46e3..71b8dfb1 100644 --- a/tests/testthat/test-as_cff_reference.R +++ b/tests/testthat/test-as_cff_reference.R @@ -1,237 +1,4 @@ -test_that("Test citations with installed packages", { - installed <- as.character(installed.packages()[, 1]) - inst <- c("base", "jsonlite", "rmarkdown") - for (i in seq_len(length(inst))) { - if (inst[i] %in% installed) { - desc <- cff_create(inst[i]) - expect_true(length(desc$`preferred-citation`) > 1) - expect_true(cff_validate(desc, verbose = FALSE)) - } - } -}) - -test_that("Test full with CITATION and (option = author)", { - # Needs an installed package - desc_path <- system.file("examples/DESCRIPTION_rgeos", package = "cffr") - cit_path <- system.file("examples/CITATION_auto", package = "cffr") - parsed <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, "cff") - - # Create cff - cffobj <- cff_create(desc_path, keys = list( - references = parsed - )) - - expect_s3_class(cffobj, "cff") - expect_snapshot(cffobj) - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) - - -test_that("Parsed several citations", { - # Needs an installed package - desc_path <- system.file("examples/DESCRIPTION_rgeos", package = "cffr") - cit_path <- system.file("examples/CITATION_auto", package = "cffr") - citobj <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(citobj, c("cff_ref_list", "cff"), exact = TRUE) - - expect_snapshot(citobj) - expect_length(citobj, 3) -}) - - -test_that("Add wrong field to citation", { - bib <- bibentry( - bibtype = "Manual", - title = "favoritefood is not valid on cff schema", - author = "Jane Smith", - favoritefood = "bananas", - type = "I should be removed" - ) - - bibparsed <- as_cff(bib) - - expect_s3_class(bibparsed, "cff") - - cffobj <- cff_create(cff(), - keys = list( - references = bibparsed - ) - ) - - expect_snapshot(cffobj) - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) - -test_that("Fix wrong orcid", { - bib <- bibentry( - bibtype = "Manual", - title = "Wrong orcid fixed by cffr", - author = person("Jane", - "Smith", - comment = c( - ORCID = - "http://orcid.org/0000-0000-0000-306X" - ) - ) - ) - - bibparsed <- as_cff(bib) - - expect_s3_class(bibparsed, "cff") - - cffobj <- cff_create(cff(), - keys = list(references = bibparsed) - ) - - expect_snapshot(cffobj) - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) - -test_that("Several identifiers and duplicates", { - bib <- bibentry( - bibtype = "Manual", - title = "A Language and Environment for Statistical Computing", - year = "2022", - year = "2023", - author = person("R Core Team"), - version = NULL, - error = "", - url = "https://www.R-project.org/", - url = "https://google.com/", - doi = "10.5281/zenodo.5366600", - doi = "10.5281/zenodo.5366601", - doi = "10.5281/zenodo.5366602", - identifiers = "a,b" - ) - - bibparsed <- as_cff(bib) - - expect_s3_class(bibparsed, "cff") - - cffobj <- cff_create(cff(), - keys = list( - references = bibparsed - ) - ) - - expect_snapshot(cffobj) - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) - -test_that("Test keywords and urls", { - bib <- bibentry( - bibtype = "Manual", - title = "A Language and Environment for Statistical Computing", - year = "2022", - author = person("R Core Team"), - url = "https://www.R-project.org/", - url = "https://google.com/", - keywords = "Some, random keywords, in, here, here" - ) - - bibparsed <- as_cff(bib) - - expect_s3_class(bibparsed, "cff") - - cffobj <- cff_create(cff(), - keys = list( - references = bibparsed - ) - ) - - expect_snapshot(cffobj) - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) - -test_that("Parse persons on CITATION", { - bib <- bibentry("Manual", - title = "A Language and Environment for Statistical Computing", - year = "2021", - author = person("R Core Team"), - contact = "A name and A contact", - conference = person("A", "conference"), - "database-provider" = person("Database", "provider"), - editors = "A editor and {Ben and Jerry}", - "editors-series" = "An {editor series} and Another", - "institution" = person("A", "institution"), - "address" = person("A", "location"), - "publisher" = person("A", "publisher"), - "recipients" = "A recipient", - "senders" = "{A Sender} and Another Sender", - "translators" = "Translator one and Translator two" - ) - - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) - - cffobj <- cff_create(cff(), - keys = list(references = bibparsed) - ) - - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) - -test_that("Test inputs", { - # Remove type - - bib <- bibentry("Book", - title = "Test", - author = "Billy Jean", - year = "2021", - publisher = "Random House", - type = "RANDOM" - ) - - - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) - - cffobj <- cff_create(cff(), - keys = list(references = bibparsed) - ) - - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) - -test_that("Fallback date", { - bib <- bibentry("Misc", - title = "Test", - author = "Billy Jean", - date = "2050-01-12", - urldate = "2099-02-02", - publisher = "Random House", - type = "RANDOM" - ) - - init_cff <- as_cff(bib) - - expect_snapshot(init_cff) -}) - -test_that("Duplicates", { - bib <- bibentry("Misc", - title = "Test", - author = "Billy Jean", - date = "2050-01-12", - urldate = "2099-02-02", - publisher = "Random House", - type = "RANDOM" - ) - bib2 <- bibentry("Manual", - title = "Test", - author = "Billy Jean", - date = "2050-01-12", - urldate = "2099-02-02", - publisher = "Random House", - type = "RANDOM" - ) - bibend <- c(rep(bib, 3), bib2) - expect_snapshot(uniq <- as_cff(bibend)) - expect_length(uniq, 2) -}) - -# Parse citation from BibTeX ---- +# Coerce citation from BibTeX ---- test_that("Article", { bib <- bibentry("Article", @@ -248,17 +15,17 @@ test_that("Article", { note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- sort(names(unclass(bib)[[1]])) fld2 <- sort(names(unclass(tobib)[[1]])) @@ -287,17 +54,17 @@ test_that("Book", { keywords = c("Two, keyword") ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- sort(names(unclass(bib)[[1]])) fld2 <- sort(names(unclass(tobib)[[1]])) @@ -319,22 +86,22 @@ test_that("Booklet", { keywords = "java" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- sort(names(unclass(bib)[[1]])) fld2 <- sort(names(unclass(tobib)[[1]])) - # Keyword is not parsed + # Keyword is not coerced expect_identical(setdiff(fld1, fld2), "keywords") }) @@ -369,18 +136,18 @@ test_that("Conference", { bib <- list(bib_un) class(bib) <- "bibentry" - bibparsed <- as_cff(bib) + bib_cff <- as_cff(bib) - expect_snapshot(bibparsed) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -409,16 +176,16 @@ test_that("InBook", { note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -449,17 +216,17 @@ test_that("InCollection", { note = "A note" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -490,22 +257,28 @@ test_that("InProceedings", { note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) expect_identical(setdiff(fld1, fld2), "series") + + # If we remove collection title use conference + bib_cff[[1]]$`collection-title` <- NULL + bib_cff[[1]]$conference$name <- "I Am a conference" + bib <- as_bibentry(bib_cff) + expect_snapshot(toBibtex(bib)) }) test_that("Manual", { @@ -521,17 +294,17 @@ test_that("Manual", { note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -553,17 +326,17 @@ test_that("MastersThesis", { month = "August", note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -582,17 +355,17 @@ test_that("Misc", { note = "A note" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -615,17 +388,17 @@ test_that("PhdThesis", { note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -650,17 +423,17 @@ test_that("Proceedings", { note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -685,17 +458,17 @@ test_that("TechReport", { note = "Example modified for testing purposes" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -713,22 +486,27 @@ test_that("Unpublished", { month = "aug", ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) expect_identical(fld1, fld2) + + # With custom note + bib_cff[[1]]$notes <- NULL + bib <- as_bibentry(bib_cff) + expect_snapshot(toBibtex(bib)) }) test_that("InBook with booktitle", { @@ -745,22 +523,22 @@ test_that("InBook with booktitle", { chapter = "4.5" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) # Should be an incollection now - res <- as_bibentry(bibparsed) + res <- as_bibentry(bib_cff) init_type <- attr(unclass(res)[[1]], "bibtype") expect_identical(tolower(init_type), "incollection") # Back to bibtex and check names - tobib <- as_bibentry(bibparsed) + tobib <- as_bibentry(bib_cff) fld1 <- unique(sort(names(unclass(bib)[[1]]))) fld2 <- unique(sort(names(unclass(tobib)[[1]]))) @@ -781,17 +559,17 @@ test_that("Test entry without author", { isbn = "1-59593-322-02", ) - bibparsed <- as_cff(bib) + bib_cff <- as_cff(bib) expect_identical( - bibparsed[[1]]$authors[[1]]$name, + bib_cff[[1]]$authors[[1]]$name, "anonymous" ) - expect_snapshot(bibparsed) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) @@ -813,18 +591,18 @@ test_that("Test entry without author but has a key", { isbn = "1-59593-322-02", ) - bibparsed <- as_cff(bib) + bib_cff <- as_cff(bib) expect_identical( - bibparsed[[1]]$authors[[1]]$name, + bib_cff[[1]]$authors[[1]]$name, "anonymous" ) - expect_snapshot(bibparsed) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) @@ -843,18 +621,18 @@ test_that("Test entry without author and key", { isbn = "1-59593-322-02", ) - bibparsed <- as_cff(bib) + bib_cff <- as_cff(bib) expect_identical( - bibparsed[[1]]$authors[[1]]$name, + bib_cff[[1]]$authors[[1]]$name, "anonymous" ) - expect_snapshot(bibparsed) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_true(cff_validate(cffobj, verbose = FALSE)) @@ -869,12 +647,12 @@ test_that("Skip misc without title", { year = 2018 ) - expect_message(bibparsed <- as_cff(bib), "Skipping") + expect_message(bib_cff <- as_cff(bib), "Skipping") - expect_null(bibparsed) + expect_null(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_snapshot(cffobj) @@ -901,14 +679,14 @@ test_that("Skip misc without title, not skipping the good one", { - expect_message(bibparsed <- as_cff(bib), "SHERPA/RoMEO") + expect_message(bib_cff <- as_cff(bib), "SHERPA/RoMEO") - expect_length(bibparsed, 1) + expect_length(bib_cff, 1) - expect_s3_class(bibparsed[[1]], "cff") + expect_s3_class(bib_cff[[1]], "cff") cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) ) expect_snapshot(cffobj) @@ -948,12 +726,60 @@ test_that("Check extended BibLatex Fields", { url = "http://www.ctan.org" ) - bibparsed <- as_cff(bib) - expect_snapshot(bibparsed) + bib_cff <- as_cff(bib) + expect_snapshot(bib_cff) cffobj <- cff_create(cff(), - keys = list(references = bibparsed) + keys = list(references = bib_cff) + ) + + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) + +test_that("Duplicate entries", { + bib <- bibentry("InBook", + title = "Bibliographies and citations", + year = "2020", + author = "Yihui Xie and Christophe Dervieux and Emily Riederer", + booktitle = "{R} Markdown Cookbook", + publisher = "Chapman and Hall/CRC", + address = "Boca Raton, Florida", + series = "The {R} Series", + isbn = "9780367563837", + url = "https://bookdown.org/yihui/rmarkdown-cookbook", + chapter = "4.5" + ) + + expect_snapshot(bib_cff <- as_cff(rep(bib, 2))) + expect_length(bib_cff, 1) +}) + +test_that("Identifiers and dois", { + bib <- bibentry( + bibtype = "Manual", + title = "A Language and Environment for Statistical Computing", + year = "2022", + year = "2023", + author = person("R Core Team"), + version = NULL, + error = "", + url = "https://www.R-project.org/", + url = "https://google.com/", + doi = "10.5281/zenodo.5366600", + doi = "10.5281/zenodo.5366601", + doi = "10.5281/zenodo.5366602", + identifiers = "a,b" ) + bib_cff <- as_cff(bib) + sin <- bib_cff[[1]] + checf <- sin[names(sin) %in% c("url", "doi", "identifiers")] + expect_snapshot(as_cff(checf)) + + + cffobj <- cff_modify(cff(), references = bib_cff) + + + expect_snapshot(cffobj) expect_true(cff_validate(cffobj, verbose = FALSE)) }) diff --git a/tests/testthat/test-assertions.R b/tests/testthat/test-assertions.R index cd96eabe..ac916977 100644 --- a/tests/testthat/test-assertions.R +++ b/tests/testthat/test-assertions.R @@ -41,6 +41,9 @@ test_that("Check is_url", { # invalid addresses expect_false( any( + is_url(NULL), + is_url(NA), + is_url(""), is_url("https:/github.com/dieghernan"), is_url("http:/github.com/dieghernan"), is_url("ftp:/github.com/dieghernan"), @@ -51,24 +54,71 @@ test_that("Check is_url", { }) -test_that("Check stop if not cff", { - obj <- cff() +test_that("Check is_substring", { + expect_true( + all( + is_substring("amanda", "a"), + is_substring("amanda erele", "e") + ) + ) + # invalid addresses + expect_false( + any( + is_substring(NULL, "a"), + is_substring("amanda", "e"), + is_substring("amanda", "A") + ) + ) +}) - expect_silent(stopifnotcff(obj)) +test_that("Check cff", { + a_pers <- as_cff_person("Barnes and Noble") + sing <- a_pers[[1]] + expect_s3_class(sing, "cff_pers") - file <- system.file("examples/CITATION_skeleton.cff", - package = "cffr" + a_bib <- as_cff(c( + bibentry("misc", title = "a"), + bibentry("misc", title = "b") + )) + sing2 <- a_bib[[1]] + expect_s3_class(sing2, "cff_ref") + expect_true( + all( + is_cff(cff()), + is_cff(a_bib), + is_cff(a_pers), + is_cff(sing), + is_cff(sing2) + ) + ) + # invalid + expect_false( + any( + is_cff("amanda"), + is_cff(list(a = 1)) + ) ) +}) - expect_silent(stopifnotcff(file)) - # Error +test_that("Check is cff file", { + expect_true(is_cff_file("CIt_aT.cff")) + expect_false(is_cff_file("CIt_aT_cff")) +}) - err <- "Some string" +test_that("is named", { + a_n <- c("a" = 1, b = "2") + expect_true(is_named(a_n)) + expect_true(is_named(as.list(a_n))) - expect_error(stopifnotcff(err)) + expect_false(is_named(LETTERS)) + expect_false(is_named(as.list(LETTERS))) +}) - # Other objects - x <- list(a = 1) - expect_error(stopifnotcff(x), "list") +test_that("is github", { + x <- list("repository-code" = "https://github.com/") + x2 <- x + x2$`repository-code` <- "gogle.com" + expect_true(is_github(x)) + expect_false(is_github(x2)) }) diff --git a/tests/testthat/test-cff.R b/tests/testthat/test-cff.R index 02a12d64..597e26cf 100644 --- a/tests/testthat/test-cff.R +++ b/tests/testthat/test-cff.R @@ -34,8 +34,7 @@ test_that("Walk trough full lifecycle", { # Write tmp <- tempfile(fileext = ".cff") cff_write(modify, outfile = tmp, validate = FALSE, verbose = FALSE) - stopifnotexists(tmp) - stopifnotcff(tmp) + expect_silent(file_exist_abort(tmp)) # Validate expect_true(cff_validate(tmp, verbose = FALSE)) diff --git a/tests/testthat/test-cff_create.R b/tests/testthat/test-cff_create.R index 4f4d693d..39435286 100644 --- a/tests/testthat/test-cff_create.R +++ b/tests/testthat/test-cff_create.R @@ -10,6 +10,35 @@ test_that("Test installed packages", { expect_silent(cff_create("yaml")) }) +test_that("Test indev", { + skip_on_cran() + current_dir <- getwd() + + name <- paste0("mock-pack", runif(1) * 10) + new_dir <- file.path(tempdir(), name) + + dir.create(new_dir, recursive = TRUE) + + expect_true(dir.exists(new_dir)) + + setwd(new_dir) + + # Move files + file.copy(system.file("examples/DESCRIPTION_basic", package = "cffr"), + to = "DESCRIPTION" + ) + + a_cff <- cff_create() + + expect_true(cff_validate(a_cff, verbose = FALSE)) + # Revert to initial wd + setwd(current_dir) + + expect_snapshot(a_cff) + + unlink(new_dir, recursive = TRUE, force = TRUE) +}) + test_that("Test dependencies extraction", { yes <- cff_create("jsonlite") no <- cff_create("jsonlite", dependencies = FALSE) @@ -163,80 +192,80 @@ test_that("Add new roles on write", { }) # Check DESCRIPTION ---- -test_that("Parse date", { +test_that("Coerce date", { desc_path <- system.file("examples/DESCRIPTION_rgeos", package = "cffr") - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_false(is.null(parsed$`date-released`)) + expect_false(is.null(a_cff$`date-released`)) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) -test_that("Parse date in another format", { +test_that("Coerce date in another format", { desc_path <- system.file("examples/DESCRIPTION_basicdate", package = "cffr") - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_false(is.null(parsed$`date-released`)) + expect_false(is.null(a_cff$`date-released`)) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) -test_that("No date parsed in DESCRIPTION without it", { +test_that("No date coerced in DESCRIPTION without it", { desc_path <- system.file("examples/DESCRIPTION_basic", package = "cffr") - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, keys = list(references = NULL) ) - expect_true(is.null(parsed$`date-released`)) + expect_true(is.null(a_cff$`date-released`)) - expect_s3_class(parsed, "cff") - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_true(cff_validate(a_cff, verbose = FALSE)) }) test_that("Parsing many urls", { desc_path <- system.file("examples/DESCRIPTION_many_urls", package = "cffr") - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_length(parsed$`repository-code`, 1) - expect_length(parsed$url, 1) - expect_length(parsed$identifiers, 3) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_length(a_cff$`repository-code`, 1) + expect_length(a_cff$url, 1) + expect_length(a_cff$identifiers, 3) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) test_that("Parsing Gitlab", { desc_path <- system.file("examples/DESCRIPTION_gitlab", package = "cffr") - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, keys = list(references = NULL) ) - expect_length(parsed$`repository-code`, 1) - expect_length(parsed$url, 1) - expect_length(parsed$identifiers, 0) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_length(a_cff$`repository-code`, 1) + expect_length(a_cff$url, 1) + expect_length(a_cff$identifiers, 0) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) test_that("Parsing many persons", { @@ -244,42 +273,42 @@ test_that("Parsing many persons", { package = "cffr" ) - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_length(parsed$authors, 4) + expect_length(a_cff$authors, 4) - authors <- unlist(parsed$authors) + authors <- unlist(a_cff$authors) expect_length(grep("erro", authors), 0) - names <- unlist(lapply(parsed$authors, names)) + names <- unlist(lapply(a_cff$authors, names)) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) test_that("Parsing wrong urls", { desc_path <- system.file("examples/DESCRIPTION_wrong_urls", package = "cffr") - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_null(parsed$`repository-code`) - expect_length(parsed$url, 1) - expect_length(parsed$identifiers, 2) + expect_null(a_cff$`repository-code`) + expect_length(a_cff$url, 1) + expect_length(a_cff$identifiers, 2) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) @@ -289,17 +318,17 @@ test_that("Parsing two maintainers", { package = "cffr" ) - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_length(parsed$authors, 3) - expect_length(parsed$contact, 2) + expect_length(a_cff$authors, 3) + expect_length(a_cff$contact, 2) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) test_that("Parsing r-universe", { @@ -307,16 +336,16 @@ test_that("Parsing r-universe", { package = "cffr" ) - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_length(parsed$repository, 1) + expect_length(a_cff$repository, 1) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) @@ -325,16 +354,16 @@ test_that("Parsing Bioconductor", { package = "cffr" ) - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_length(parsed$repository, 1) + expect_length(a_cff$repository, 1) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) test_that("Parsing Posit Package Manager", { @@ -342,19 +371,19 @@ test_that("Parsing Posit Package Manager", { package = "cffr" ) - parsed <- cff_create(desc_path, + a_cff <- cff_create(desc_path, gh_keywords = FALSE, keys = list(references = NULL) ) - expect_length(parsed$repository, 1) + expect_length(a_cff$repository, 1) expect_identical( - parsed$repository, + a_cff$repository, "https://CRAN.R-project.org/package=resmush" ) - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) test_that("Search package on CRAN", { @@ -368,16 +397,16 @@ test_that("Search package on CRAN", { newfile <- desc::desc_set("Package", "ggplot2", file = tmp) - parsed <- cff_create(tmp, gh_keywords = FALSE) - expect_length(parsed$repository, 1) + a_cff <- cff_create(tmp, gh_keywords = FALSE) + expect_length(a_cff$repository, 1) expect_equal(clean_str(newfile$get("Package")), "ggplot2") - expect_equal(parsed$repository, "https://CRAN.R-project.org/package=ggplot2") + expect_equal(a_cff$repository, "https://CRAN.R-project.org/package=ggplot2") - expect_s3_class(parsed, "cff") - expect_snapshot(parsed) - expect_true(cff_validate(parsed, verbose = FALSE)) + expect_s3_class(a_cff, "cff") + expect_snapshot(a_cff) + expect_true(cff_validate(a_cff, verbose = FALSE)) }) @@ -479,7 +508,7 @@ test_that("Validate keywords", { }) -test_that("Parse keywords from GH", { +test_that("Coerce keywords from GH", { skip_on_cran() skip_if_offline() skip_if( diff --git a/tests/testthat/test-cff_gha_update.R b/tests/testthat/test-cff_gha_update.R new file mode 100644 index 00000000..d6716234 --- /dev/null +++ b/tests/testthat/test-cff_gha_update.R @@ -0,0 +1,41 @@ +test_that("Test in mock package", { + skip_on_cran() + current_dir <- getwd() + + name <- paste0("mock-pack", runif(1) * 10) + new_dir <- file.path(tempdir(), name) + + dir.create(new_dir, recursive = TRUE) + + expect_true(dir.exists(new_dir)) + + setwd(new_dir) + + # Move files + file.copy(system.file("examples/DESCRIPTION_many_urls", package = "cffr"), + to = "DESCRIPTION" + ) + + + # Create Rbuildignore + file.create(".Rbuildignore", showWarnings = FALSE) + expect_true(file_exist_abort(".Rbuildignore")) + + # Add action + expect_snapshot(cff_gha_update()) + + expect_snapshot(cff_gha_update()) + expect_snapshot(cff_gha_update(overwrite = TRUE)) + + expect_true(file_exist_abort(file.path( + ".github", + "workflows", + "update-citation-cff.yaml" + ))) + + + # Revert to initial wd + setwd(current_dir) + + unlink(new_dir, recursive = TRUE, force = TRUE) +}) diff --git a/tests/testthat/test-cff_read.R b/tests/testthat/test-cff_read.R index b3ffea4a..753d7de7 100644 --- a/tests/testthat/test-cff_read.R +++ b/tests/testthat/test-cff_read.R @@ -108,9 +108,9 @@ test_that("cff_read CITATION_basic", { my_meta <- desc_to_meta(a_desc) path <- system.file("examples/CITATION_basic", package = "cffr") - parsed <- cff_read(path, my_meta) - expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) - expect_equal(length(parsed), 2) + a_cff <- cff_read(path, my_meta) + expect_s3_class(a_cff, c("cff_ref_list", "cff"), exact = TRUE) + expect_equal(length(a_cff), 2) }) test_that("cff_read CITATION with no encoding", { @@ -118,8 +118,8 @@ test_that("cff_read CITATION with no encoding", { cit_path <- system.file("examples/CITATION_basic", package = "cffr") my_meta <- desc_to_meta(desc_path) - parsed <- cff_read_citation(cit_path, my_meta) - expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) + a_cff <- cff_read_citation(cit_path, my_meta) + expect_s3_class(a_cff, c("cff_ref_list", "cff"), exact = TRUE) }) test_that("cff_read CITATION_auto", { @@ -128,9 +128,9 @@ test_that("cff_read CITATION_auto", { cit_path <- system.file("examples/CITATION_auto", package = "cffr") my_meta <- desc_to_meta(desc_path) - parsed <- cff_read(cit_path, my_meta) + a_cff <- cff_read(cit_path, my_meta) - expect_equal(length(parsed), 3) + expect_equal(length(a_cff), 3) }) test_that("cff_read CITATION_rmarkdown", { @@ -138,48 +138,48 @@ test_that("cff_read CITATION_rmarkdown", { cit_path <- system.file("examples/CITATION_rmarkdown", package = "cffr") my_meta <- desc_to_meta(desc_path) - parsed <- cff_read(cit_path, my_meta) + a_cff <- cff_read(cit_path, my_meta) - expect_equal(length(parsed), 3) + expect_equal(length(a_cff), 3) }) test_that("cff_read_safe CITATION_basic", { desc_path <- system.file("examples/DESCRIPTION_basic", package = "cffr") cit_path <- system.file("examples/CITATION_basic", package = "cffr") - parsed <- cff_safe_read_citation(desc_path, cit_path) + a_cff <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) - expect_equal(length(parsed), 2) + expect_s3_class(a_cff, c("cff_ref_list", "cff"), exact = TRUE) + expect_equal(length(a_cff), 2) }) test_that("cff_read_safe CITATION with no encoding", { desc_path <- system.file("examples/DESCRIPTION_no_encoding", package = "cffr") cit_path <- system.file("examples/CITATION_basic", package = "cffr") - parsed <- cff_safe_read_citation(desc_path, cit_path) + a_cff <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) - expect_equal(length(parsed), 2) + expect_s3_class(a_cff, c("cff_ref_list", "cff"), exact = TRUE) + expect_equal(length(a_cff), 2) }) test_that("cff_read_safe CITATION_auto", { # Needs an installed package desc_path <- system.file("examples/DESCRIPTION_rgeos", package = "cffr") cit_path <- system.file("examples/CITATION_auto", package = "cffr") - parsed <- cff_safe_read_citation(desc_path, cit_path) + a_cff <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) - expect_equal(length(parsed), 3) + expect_s3_class(a_cff, c("cff_ref_list", "cff"), exact = TRUE) + expect_equal(length(a_cff), 3) }) test_that("cff_read_safe CITATION_rmarkdown", { desc_path <- system.file("examples/DESCRIPTION_basic", package = "cffr") cit_path <- system.file("examples/CITATION_rmarkdown", package = "cffr") - parsed <- cff_safe_read_citation(desc_path, cit_path) + a_cff <- cff_safe_read_citation(desc_path, cit_path) - expect_s3_class(parsed, c("cff_ref_list", "cff"), exact = TRUE) - expect_equal(length(parsed), 3) + expect_s3_class(a_cff, c("cff_ref_list", "cff"), exact = TRUE) + expect_equal(length(a_cff), 3) }) @@ -192,3 +192,20 @@ test_that("cff_read_safe CITATION NULL", { cit_path )) }) + +test_that("Corrupt CITATION", { + tmp <- tempfile("CITATION") + writeLines("I am a bad CITATION", tmp) + expect_message( + expect_message(anull <- cff_read(tmp), "It was not possible to read"), + "Can't" + ) + expect_null(anull) + + + # Internal + desc_path <- system.file("x", package = "cffr") + expect_silent(anull <- cff_safe_read_citation(desc_path = desc_path, tmp)) + + expect_null(anull) +}) diff --git a/tests/testthat/test-cff_validate.R b/tests/testthat/test-cff_validate.R index d9923520..8a428ecc 100644 --- a/tests/testthat/test-cff_validate.R +++ b/tests/testthat/test-cff_validate.R @@ -35,12 +35,6 @@ test_that("Validate error CITATION.cff", { expect_snapshot(df) expect_silent(cff_validate(err, verbose = FALSE)) - - # From path - expect_message( - cff_validate(err), - err - ) }) test_that("Validate cffr objects from installed packages", { diff --git a/tests/testthat/test-cff_write.R b/tests/testthat/test-cff_write.R index f27746cd..8e93fa3d 100644 --- a/tests/testthat/test-cff_write.R +++ b/tests/testthat/test-cff_write.R @@ -6,13 +6,13 @@ test_that("Write basic", { tmp <- tempfile(fileext = ".cff") expect_message(cff_write(desc_file, outfile = tmp, validate = FALSE)) expect_silent(cff_write(desc_file, outfile = tmp, verbose = FALSE)) - expect_true(file.exists(tmp)) + expect_true(file_exist_abort(tmp)) # Validate from file expect_true(cff_validate(tmp, verbose = FALSE)) file.remove(tmp) - expect_false(file.exists(tmp)) + expect_false(file_exist_abort(tmp)) }) test_that("Write to a non-existing folder", { @@ -31,13 +31,13 @@ test_that("Write to a non-existing folder", { expect_true(dir.exists(file.path( tempdir(), "test_new_folder", "recursive" ))) - expect_true(file.exists(tmp)) + expect_true(file_exist_abort(tmp)) # Validate from file expect_true(cff_validate(tmp, verbose = FALSE)) file.remove(tmp) - expect_false(file.exists(tmp)) + expect_false(file_exist_abort(tmp)) }) test_that("Write no encoding", { @@ -53,13 +53,13 @@ test_that("Write no encoding", { verbose = FALSE ) - expect_true(file.exists(tmp)) + expect_true(file_exist_abort(tmp)) # Validate from file expect_true(cff_validate(tmp, verbose = FALSE)) file.remove(tmp) - expect_false(file.exists(tmp)) + expect_false(file_exist_abort(tmp)) }) test_that("Add new keys", { @@ -92,13 +92,13 @@ test_that("Add new keys", { ) expect_snapshot(s) - expect_true(file.exists(tmp)) + expect_true(file_exist_abort(tmp)) # Validate from file expect_true(cff_validate(tmp, verbose = FALSE)) file.remove(tmp) - expect_false(file.exists(tmp)) + expect_false(file_exist_abort(tmp)) }) @@ -139,7 +139,7 @@ test_that("Append keys", { expect_true(cff_validate(tmp, verbose = FALSE)) file.remove(tmp) - expect_false(file.exists(tmp)) + expect_false(file_exist_abort(tmp)) }) test_that("Fix extension of the file", { @@ -148,10 +148,10 @@ test_that("Fix extension of the file", { tmp <- tempfile() expect_silent(cff_write(cffobj, tmp, verbose = FALSE)) - expect_false(file.exists(tmp)) - expect_true(file.exists(paste0(tmp, ".cff"))) + expect_false(file_exist_abort(tmp)) + expect_true(file_exist_abort(paste0(tmp, ".cff"))) expect_true(cff_validate(paste0(tmp, ".cff"), verbose = FALSE)) file.remove(paste0(tmp, ".cff")) - expect_false(file.exists(paste0(tmp, ".cff"))) + expect_false(file_exist_abort(paste0(tmp, ".cff"))) }) diff --git a/tests/testthat/test-cff_write_misc.R b/tests/testthat/test-cff_write_misc.R index 53da829f..d5ce82b7 100644 --- a/tests/testthat/test-cff_write_misc.R +++ b/tests/testthat/test-cff_write_misc.R @@ -13,16 +13,16 @@ test_that("Write", { # Fix extensions file <- paste0(file, ".bib") - expect_true(file.exists(file)) + expect_true(file_exist_abort(file)) expect_snapshot_file(file) # Check backup - expect_false(file.exists(paste0(file, ".bk1"))) + expect_false(file_exist_abort(paste0(file, ".bk1"))) # Check now backup exists cff_write_bib(bib, file, append = TRUE) - expect_true(file.exists(paste0(file, ".bk1"))) + expect_true(file_exist_abort(paste0(file, ".bk1"))) file.remove(file) file.remove(paste0(file, ".bk1")) @@ -99,7 +99,7 @@ test_that("Test dir creation", { expect_silent(cff_write_bib(bib, file, verbose = FALSE)) expect_true(dir.exists(dir)) - expect_true(file.exists(file)) + expect_true(file_exist_abort(file)) unlink(dir, recursive = TRUE, force = TRUE) @@ -110,7 +110,7 @@ test_that("Test dir creation", { expect_message(cff_write_bib(bib, file, verbose = TRUE), "Creating directory") expect_true(dir.exists(dir)) - expect_true(file.exists(file)) + expect_true(file_exist_abort(file)) unlink(dir, recursive = TRUE, force = TRUE) }) @@ -133,7 +133,7 @@ test_that("Write CITATION", { expect_message(cff_write_citation(bib, file, verbose = TRUE)) # Check backup - expect_false(file.exists(paste0(file, ".bk1"))) + expect_false(file_exist_abort(paste0(file, ".bk1"))) # Check now backup exists and use cff expect_silent(cff_write_citation(f1, file, diff --git a/tests/testthat/test-merge_desc_cit.R b/tests/testthat/test-merge_desc_cit.R index 545c5df7..44040d83 100644 --- a/tests/testthat/test-merge_desc_cit.R +++ b/tests/testthat/test-merge_desc_cit.R @@ -8,9 +8,9 @@ test_that("Merge all DESCRIPTION files with CITATION_basic", { package = "cffr" ) for (i in seq_len(length(allfiles))) { - desc_parse <- cff_read_description(allfiles[i], gh_keywords = FALSE) + desc_cff <- cff_read_description(allfiles[i], gh_keywords = FALSE) generate_cit <- cff_safe_read_citation(allfiles[i], citpath) - merged <- merge_desc_cit(desc_parse, generate_cit) + merged <- merge_desc_cit(desc_cff, generate_cit) merged <- as_cff(merged) expect_snapshot(merged) diff --git a/tests/testthat/test-methods.R b/tests/testthat/test-methods.R index 3de6a711..d7cce0e2 100644 --- a/tests/testthat/test-methods.R +++ b/tests/testthat/test-methods.R @@ -168,6 +168,24 @@ test_that("as.person method", { expect_snapshot( format(aut2, include = c("given", "family", "email", "role", "comment")) ) + + # Malformed + malf <- getref$authors + malf[[1]] <- list(a = "list") + expect_s3_class(malf, "cff_pers_list") + + expect_snapshot(end <- as.person(malf)) + expect_s3_class(end, "person") + expect_length(end, 1) + + # Duplicates + aa <- getref$authors + aa[[3]] <- aa[[1]] + expect_s3_class(aa, "cff_pers_list") + + expect_snapshot(aa2 <- as.person(aa)) + expect_s3_class(aa2, "person") + expect_length(aa2, 2) }) test_that("Errors on other as.person methods", { diff --git a/tests/testthat/test-mock-package.R b/tests/testthat/test-mock-package.R index b859fa5c..25c023f4 100644 --- a/tests/testthat/test-mock-package.R +++ b/tests/testthat/test-mock-package.R @@ -28,11 +28,11 @@ test_that("Test in mock package", { expect_silent(cff_write_citation(cit, "./inst/CITATION", verbose = FALSE)) - expect_true(file.exists("./inst/CITATION")) + expect_true(file_exist_abort("./inst/CITATION")) # Create Rbuildignore file.create(".Rbuildignore", showWarnings = FALSE) - expect_true(file.exists(".Rbuildignore")) + expect_true(file_exist_abort(".Rbuildignore")) # Add action expect_message( @@ -46,7 +46,7 @@ test_that("Test in mock package", { expect_message(cff_gha_update(), "already installed") expect_message(cff_gha_update(overwrite = TRUE), "Installing") - expect_true(file.exists(file.path( + expect_true(file_exist_abort(file.path( ".github", "workflows", "update-citation-cff.yaml" @@ -58,7 +58,7 @@ test_that("Test in mock package", { expect_output(cff_write()) - expect_true(file.exists("CITATION.cff")) + expect_true(file_exist_abort("CITATION.cff")) expect_true(cff_validate("CITATION.cff", verbose = FALSE)) @@ -79,4 +79,6 @@ test_that("Test in mock package", { expect_snapshot(cffobj) expect_snapshot(toBibtex(cit)) expect_snapshot(toBibtex(a_bib)) + + unlink(new_dir, recursive = TRUE, force = TRUE) }) diff --git a/tests/testthat/test-utils-alerts.R b/tests/testthat/test-utils-alerts.R new file mode 100644 index 00000000..d97c2927 --- /dev/null +++ b/tests/testthat/test-utils-alerts.R @@ -0,0 +1,57 @@ +test_that("Check abort_if_not_cff", { + obj <- cff() + + expect_silent(abort_if_not_cff(obj)) + + file <- system.file("examples/CITATION_skeleton.cff", + package = "cffr" + ) + + expect_silent(abort_if_not_cff(file)) + + # Error + + err <- "Some string" + + expect_snapshot(abort_if_not_cff(err), error = TRUE) + + # Other objects + x <- data.frame(a = 1) + expect_snapshot(abort_if_not_cff(x), error = TRUE) +}) + +test_that("Check file_exist_abort", { + nonexits <- "aaaaaaaa" + expect_silent(file_exist_abort(nonexits)) + expect_false(file_exist_abort(nonexits)) + + # Signal an error + expect_snapshot(file_exist_abort(nonexits, abort = TRUE), error = TRUE) + # With dir + non_with_path <- "./R/no/existe" + expect_snapshot(file_exist_abort(non_with_path, abort = TRUE), error = TRUE) + + + tmp <- tempfile() + + writeLines("a", tmp) + expect_true(file_exist_abort(tmp)) + expect_true(file_exist_abort(tmp, abort = TRUE)) + + unlink(tmp) +}) + +test_that("Check match_cff_arg", { + x <- "aaaaaaaa" + expect_snapshot(match_cff_arg("a", "b", "..."), error = TRUE) + expect_snapshot(match_cff_arg("a", c("b", "c", "d"), "what"), error = TRUE) + + # Test call + my_fun <- function(x = "a") { + match_cff_arg("a", LETTERS[1:5], "aa", environment()) + } + + expect_snapshot(my_fun(), error = TRUE) + expect_silent(a <- match_cff_arg(c("a", "b"), letters, "...")) + expect_identical(a, "a") +}) diff --git a/tests/testthat/test_ci/test-full_cff.R b/tests/testthat/test_ci/test-full_cff.R index 2da3ad32..981d78fa 100644 --- a/tests/testthat/test_ci/test-full_cff.R +++ b/tests/testthat/test_ci/test-full_cff.R @@ -38,7 +38,7 @@ # cit_path <- file.path(find.package(installed[i, ]$Package), "CITATION") # # -# if (file.exists(cit_path)) { +# if (file_exist_abort(cit_path)) { # withcit <- c(withcit, TRUE) # } else { # withcit <- c(withcit, FALSE) diff --git a/tests/testthat/test_ci/test-new.R b/tests/testthat/test_ci/test-new.R index 5ea2c93e..c29af292 100644 --- a/tests/testthat/test_ci/test-new.R +++ b/tests/testthat/test_ci/test-new.R @@ -2,7 +2,6 @@ library(cffr) installed <- as.data.frame(installed.packages()[, c("Package", "Version")]) installed <- installed[order(installed$Package), ] - rownames(installed) <- seq_len(nrow(installed)) l <- nrow(installed) diff --git a/vignettes/bibtex_cff.Rmd b/vignettes/bibtex_cff.Rmd index 74b012cb..7567509f 100644 --- a/vignettes/bibtex_cff.Rmd +++ b/vignettes/bibtex_cff.Rmd @@ -120,7 +120,7 @@ entry <- bibentry("book", toBibtex(entry) ``` -The final results of the entry as a text string would be parsed as[^1]: +The final results of the entry as a text string would be coerced as[^1]: [^1]: By default **R** Pandoc would generate the cite on the Chicago author-date format [@rmarkdowncookbook2020] @@ -490,7 +490,7 @@ We provide more detail on some of the mappings presented in the table above: **year** fields. - When **pages** is provided as a range separated by `--`, i.e, **pages = - {3--5}** would be parsed as [start: 3]{.underline}, [end: 5]{.underline} in + {3--5}** would be coerced as [start: 3]{.underline}, [end: 5]{.underline} in [CFF]{.underline}. #### BibLaTeX @@ -952,8 +952,8 @@ knitr::kable(df_table, **BibTeX entry** -Note that **month** can't be parsed to a single integer in the range `1--12` as -required on CFF, so it is not parsed to avoid validation errors. +Note that **month** can't be coerce to a single integer in the range `1--12` as +required on CFF, so it is ignored to avoid validation errors. ``` bibtex @manual{manual-full, diff --git a/vignettes/cffr.Rmd b/vignettes/cffr.Rmd index bbed3bd7..a7779d6c 100644 --- a/vignettes/cffr.Rmd +++ b/vignettes/cffr.Rmd @@ -82,7 +82,8 @@ Congratulations! Now you have a full `CITATION.cff` file for your **R** package. ## Modifying your `CITATION.cff` file You can easily customize the `cff` object (a custom class of **cffr**) using the -parsers provided in the package, as well as making use of the `keys` parameter. +coercion system provided in the package, as well as making use of the `keys` +parameter. We would create a `cff` object using `cff()` (for example purposes only) and we would add or modify contents of it. @@ -131,7 +132,7 @@ schema](https://github.com/citation-file-format/citation-file-format/blob/main/s Following the previous example, we are going to add a new author first. For doing that, we need first to extract the current author of the package and -append the parsed person: +append the coerced person: ```{r includeauthor} # Valid person keys @@ -154,13 +155,13 @@ chiquito <- person("Gregorio", chiquito # To cff -chiquito_parsed <- as_cff_person(chiquito) -chiquito_parsed +chiquito_cff <- as_cff_person(chiquito) +chiquito_cff # Append to previous authors -newauthors <- c(modobject$authors, chiquito_parsed) +newauthors <- c(modobject$authors, chiquito_cff) newauthors newauthorobject <- cff_modify(modobject, authors = newauthors) @@ -179,7 +180,7 @@ would add two references, one created with `bibentry()` and another with cff_schema_definitions_refs() -# Auto parsed from another R package +# Auto coercion from another R package base_r <- citation("base") bib <- bibentry("Book", diff --git a/vignettes/crosswalk.Rmd b/vignettes/crosswalk.Rmd index cf0bdbd8..f9bf6a83 100644 --- a/vignettes/crosswalk.Rmd +++ b/vignettes/crosswalk.Rmd @@ -32,7 +32,7 @@ Format schema version ## Summary {#summary} -We summarize here the fields that **cffr** can parse and the original source of +We summarize here the fields that **cffr** can coerce and the original source of information for each one of them. The details on each key are presented on the next section of the document. The assessment of fields are based on the [Guide to Citation File Format schema version @@ -68,7 +68,7 @@ origin[keys %in% c( )] <- "CITATION file" -origin[origin == FALSE] <- "Not parsed by cffr" +origin[origin == FALSE] <- "Ignored by cffr" df <- data.frame( key = paste0("", keys, ""), @@ -111,7 +111,7 @@ cat(pkg$get("Description")) ### authors -This key is parsed from the `"Authors"` or `"Authors@R"` field of the +This key is coerced from the `"Authors"` or `"Authors@R"` field of the `DESCRIPTION` file. By default persons with the role `"aut"` or `"cre"` are considered, however this can be modified via the `authors_roles` parameter. @@ -182,7 +182,7 @@ v1.2.0](https://github.com/citation-file-format/citation-file-format/blob/main/s ### contact -This key is parsed from the `"Authors"` or `"Authors@R"` field of the +This key is coerced from the `"Authors"` or `"Authors@R"` field of the `DESCRIPTION` file. Only persons with the role `"cre"` (i.e, the maintainer(s)) are considered. @@ -260,7 +260,7 @@ cat(cff_create(tmp)$`date-released`) ### doi {#doi} -This key is parsed from the `"doi"` field of the +This key is coerced from the `"doi"` field of the [preferred-citation](#preferred-citation) object.
From 8a65777dfc1e041f65fba69a9d29907248037a89 Mon Sep 17 00:00:00 2001 From: Diego H Date: Thu, 7 Mar 2024 15:28:52 +0100 Subject: [PATCH 12/13] More checks --- .github/workflows/check-full.yaml | 1 + R/utils-methods.R | 10 +++++---- tests/testthat/_snaps/as_bibentry.md | 24 --------------------- tests/testthat/_snaps/methods.md | 23 ++++++++++++++------ tests/testthat/test-as_bibentry.R | 32 ++-------------------------- tests/testthat/test-methods.R | 12 +++++++++++ 6 files changed, 38 insertions(+), 64 deletions(-) diff --git a/.github/workflows/check-full.yaml b/.github/workflows/check-full.yaml index a1077e05..203bf3f0 100644 --- a/.github/workflows/check-full.yaml +++ b/.github/workflows/check-full.yaml @@ -35,6 +35,7 @@ jobs: - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} - {os: ubuntu-latest, r: 'oldrel'} - {os: ubuntu-latest, r: 'oldrel-2'} + - {os: ubuntu-latest, r: '4.0'} env: GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} diff --git a/R/utils-methods.R b/R/utils-methods.R index 9cf9a4a8..e0cdf19e 100644 --- a/R/utils-methods.R +++ b/R/utils-methods.R @@ -20,10 +20,12 @@ make_r_person <- function(x) { family <- NULL } else { given <- clean_str(x[["given-names"]]) - family <- clean_str(paste( - clean_str(x[["name-particle"]]), clean_str(x[["family-names"]]), - clean_str(x[["name-suffix"]]) - )) + family <- paste(clean_str(x[["name-particle"]]), + clean_str(x[["family-names"]])) + # Add suffix with comma + family <- paste0(c(family, clean_str(x[["name-suffix"]])), collapse = ", ") + family <- clean_str(family) + } role <- clean_str(x$role) diff --git a/tests/testthat/_snaps/as_bibentry.md b/tests/testthat/_snaps/as_bibentry.md index 5c8bdf59..9730bcb8 100644 --- a/tests/testthat/_snaps/as_bibentry.md +++ b/tests/testthat/_snaps/as_bibentry.md @@ -28,30 +28,6 @@ Error in `as_bibentry()`: ! Don't know how to extract a from "invented_package". If it is a package run `install.packages("invented_package")` first. ---- - - Code - as_bibentry(f) - Output - Basic M (????). "basicdesc: A Basic Description." - . - ---- - - Code - as_bibentry("invented_package") - Condition - Error in `as_bibentry()`: - ! Don't know how to extract a from "invented_package". If it is a package run `install.packages("invented_package")` first. - ---- - - Code - as_bibentry(f) - Output - Basic M (????). "basicdesc: A Basic Description." - . - # as_bibentry cff Code diff --git a/tests/testthat/_snaps/methods.md b/tests/testthat/_snaps/methods.md index ed342e13..a09abb62 100644 --- a/tests/testthat/_snaps/methods.md +++ b/tests/testthat/_snaps/methods.md @@ -879,7 +879,7 @@ Code dput(aut) Output - structure(list(list(given = "One Truly", family = "van der Real Person IV", + structure(list(list(given = "One Truly", family = "van der Real Person, IV", role = NULL, email = "project@entity.com", comment = c(alias = "Citey", affiliation = "Excellent University, Niceplace, Arcadia", address = "22 Acacia Avenue", city = "Citationburgh", region = "Renfrewshire", @@ -892,14 +892,14 @@ Code format(aut, include = c("given", "family", "email", "role", "comment")) Output - [1] "One Truly van der Real Person IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" + [1] "One Truly van der Real Person, IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" --- Code dput(aut2) Output - structure(list(list(given = "One Truly", family = "van der Real Person IV", + structure(list(list(given = "One Truly", family = "van der Real Person, IV", role = NULL, email = "project@entity.com", comment = c(alias = "Citey", affiliation = "Excellent University, Niceplace, Arcadia", address = "22 Acacia Avenue", city = "Citationburgh", region = "Renfrewshire", @@ -919,8 +919,8 @@ Code format(aut2, include = c("given", "family", "email", "role", "comment")) Output - [1] "One Truly van der Real Person IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" - [2] "Entity Project Team Conference entity (22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io, 2017-01-01, 2017-01-31, The team garage)" + [1] "One Truly van der Real Person, IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" + [2] "Entity Project Team Conference entity (22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io, 2017-01-01, 2017-01-31, The team garage)" --- @@ -936,6 +936,17 @@ Message i Removing duplicate objects. +# as.person method names and particles + + Code + cf + Output + - family-names: Wicksteed + given-names: P. H. + name-particle: von + name-suffix: III + - name: The translator factory + # Errors on other as.person methods Code @@ -1098,7 +1109,7 @@ Code toBibtex(sev_auth) Output - [1] "{The Big Bopper} and Sinatra, Frank and Martin, Dean and Davis Jr., Sammy" + [1] "{The Big Bopper} and Sinatra, Frank and Martin, Dean and Davis, Jr., Sammy" --- diff --git a/tests/testthat/test-as_bibentry.R b/tests/testthat/test-as_bibentry.R index 2ef70291..40a9a94b 100644 --- a/tests/testthat/test-as_bibentry.R +++ b/tests/testthat/test-as_bibentry.R @@ -122,36 +122,8 @@ test_that("as_bibentry character", { # If another kind of file f <- system.file("examples/DESCRIPTION_basic", package = "cffr") - - expect_snapshot(as_bibentry(f)) -}) - -test_that("as_bibentry character", { - skip_if_not_installed("rmarkdown") - - base <- as_bibentry("rmarkdown") - - expect_s3_class(base, "bibentry") - - expect_length(base, 1) - - refs <- as_bibentry("rmarkdown", what = "references") - expect_s3_class(refs, "bibentry") - - expect_gte(length(refs), 1) - - all <- as_bibentry("rmarkdown", what = "all") - expect_s3_class(all, "bibentry") - - expect_length(all, length(base) + length(refs)) - - # If an invented package - expect_snapshot(as_bibentry("invented_package"), error = TRUE) - - # If another kind of file - f <- system.file("examples/DESCRIPTION_basic", package = "cffr") - - expect_snapshot(as_bibentry(f)) + s <- as_bibentry(f) + expect_s3_class(s, "bibentry") }) test_that("as_bibentry cff", { diff --git a/tests/testthat/test-methods.R b/tests/testthat/test-methods.R index d7cce0e2..aea7b623 100644 --- a/tests/testthat/test-methods.R +++ b/tests/testthat/test-methods.R @@ -188,6 +188,18 @@ test_that("as.person method", { expect_length(aa2, 2) }) +test_that("as.person method names and particles", { + str <- "von Wicksteed, III, P. H. and {The translator factory}" + + cf <- as_cff_person(str) + expect_snapshot(cf) + + pers_bib <- toBibtex(as.person(cf)) + again <- as_cff_person(pers_bib) + expect_identical(cf, again) + +}) + test_that("Errors on other as.person methods", { path <- system.file("examples/CITATION_complete.cff", package = "cffr") the_cff <- cff_read(path) From 3f7182a2b69580b3c30f2d34d53f3e362ab8fe06 Mon Sep 17 00:00:00 2001 From: Diego H Date: Thu, 7 Mar 2024 19:45:46 +0100 Subject: [PATCH 13/13] Finish refactor --- NAMESPACE | 4 + R/cff_create.R | 2 +- R/cff_read.R | 26 +- R/methods.R | 140 ++++++++-- ...ls-read-description.R => utils-cff_read.R} | 34 +-- R/utils-create.R | 12 +- R/utils-methods.R | 128 +-------- README.md | 2 +- codemeta.json | 2 +- man/cff_class.Rd | 2 +- tests/testthat/_snaps/cff_read.md | 91 +++++++ tests/testthat/_snaps/encoding.md | 91 ------- tests/testthat/_snaps/methods.md | 244 +++++++++--------- tests/testthat/_snaps/parse_dependencies.md | 41 --- tests/testthat/_snaps/utils-cff_ref.md | 9 + .../{merge_desc_cit.md => utils-create.md} | 41 +++ tests/testthat/test-cff_read.R | 40 ++- tests/testthat/test-encoding.R | 24 -- tests/testthat/test-methods.R | 1 - tests/testthat/test-parse_dependencies.R | 13 - tests/testthat/test-utils-bib.R | 8 + ...desc_license.R => test-utils-cff_create.R} | 0 tests/testthat/test-utils-cff_ref.R | 8 + ...t-merge_desc_cit.R => test-utils-create.R} | 14 + 24 files changed, 500 insertions(+), 477 deletions(-) rename R/{utils-read-description.R => utils-cff_read.R} (90%) delete mode 100644 tests/testthat/_snaps/encoding.md delete mode 100644 tests/testthat/_snaps/parse_dependencies.md create mode 100644 tests/testthat/_snaps/utils-cff_ref.md rename tests/testthat/_snaps/{merge_desc_cit.md => utils-create.md} (95%) delete mode 100644 tests/testthat/test-encoding.R delete mode 100644 tests/testthat/test-parse_dependencies.R create mode 100644 tests/testthat/test-utils-bib.R rename tests/testthat/{test-parse_desc_license.R => test-utils-cff_create.R} (100%) create mode 100644 tests/testthat/test-utils-cff_ref.R rename tests/testthat/{test-merge_desc_cit.R => test-utils-create.R} (64%) diff --git a/NAMESPACE b/NAMESPACE index 42d209b7..dfe89f54 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -3,6 +3,10 @@ S3method("[",cff_pers_list) S3method("[",cff_ref_list) S3method(as.data.frame,cff) +S3method(as.data.frame,cff_pers) +S3method(as.data.frame,cff_pers_list) +S3method(as.data.frame,cff_ref) +S3method(as.data.frame,cff_ref_list) S3method(as.list,cff) S3method(as.person,cff) S3method(as.person,cff_pers) diff --git a/R/cff_create.R b/R/cff_create.R index 8ed8ac00..63092e6d 100644 --- a/R/cff_create.R +++ b/R/cff_create.R @@ -158,7 +158,7 @@ cff_create <- function(x, keys = list(), cff_version = "1.2.0", # Add software dependencies if (dependencies) { instpack <- as.character(installed.packages()[, "Package"]) - deps <- parse_dependencies(desc_path, instpack) + deps <- get_dependencies(desc_path, instpack) cffobjend$references <- unique(c(cffobjend$references, deps)) } diff --git a/R/cff_read.R b/R/cff_read.R index c71aea12..547c631f 100644 --- a/R/cff_read.R +++ b/R/cff_read.R @@ -170,22 +170,22 @@ cff_read_description <- function(path, cff_version = "1.2.0", "cff-version" = cff_version, message = msg, type = "software", - title = parse_desc_title(pkg), - version = parse_desc_version(pkg), - authors = parse_desc_authors(pkg, authors_roles = authors_roles), - abstract = parse_desc_abstract(pkg), - repository = parse_desc_repository(pkg), - "repository-code" = parse_desc_urls(pkg)$repo, - url = parse_desc_urls(pkg)$url, - identifiers = parse_desc_urls(pkg)$identifiers, - "date-released" = parse_desc_date_released(pkg), - contact = parse_desc_contacts(pkg), - keywords = parse_desc_keywords(pkg), - license = unlist(parse_desc_license(pkg)) + title = get_desc_title(pkg), + version = get_desc_version(pkg), + authors = get_desc_authors(pkg, authors_roles = authors_roles), + abstract = get_desc_abstract(pkg), + repository = get_desc_repository(pkg), + "repository-code" = get_desc_urls(pkg)$repo, + url = get_desc_urls(pkg)$url, + identifiers = get_desc_urls(pkg)$identifiers, + "date-released" = get_desc_date_released(pkg), + contact = get_desc_contacts(pkg), + keywords = get_desc_keywords(pkg), + license = unlist(get_desc_license(pkg)) ) if (gh_keywords) { - ghtopics <- parse_ghtopics(field_list) + ghtopics <- get_gh_topics(field_list) field_list$keywords <- unique(c(field_list$keywords, ghtopics)) } diff --git a/R/methods.R b/R/methods.R index 3c8613af..7bca56d9 100644 --- a/R/methods.R +++ b/R/methods.R @@ -30,32 +30,132 @@ c.cff <- function(..., recursive = FALSE) { #' @noRd #' @export as.data.frame.cff <- function(x, row.names = NULL, optional = FALSE, ...) { - # List of references - if (inherits(x, "cff_ref_list")) { - x_n <- list("references" = x) - the_df <- cff_to_df(x_n) - } else if (inherits(x, "cff_pers_list")) { - n_l <- seq_len(length(x)) - end_df <- lapply(n_l, function(i) { - df <- as.data.frame(x[[i]]) - nm <- names(df) - names(df) <- paste0("person", ".", sprintf("%02d", i - 1), ".", nm) + # For better dispatching + x <- as_cff(as.list(x)) + + len <- length(x) + key_len <- seq_len(len) + ref_n <- names(x) + + df_l <- lapply(key_len, function(y) { + el <- x[[y]] + nm <- ref_n[y] + nm <- gsub("-", "_", nm) + + if (nm == "preferred_citation") { + return(as.data.frame(el, prefix = nm)) + } + + if (any(inherits(el, "cff_pers"), inherits(el, "cff_pers_list"))) { + return(as.data.frame(el, prefix = nm)) + } + + if (inherits(el, "cff_ref_list")) { + return(as.data.frame(el, prefix = nm)) + } + + if (length(el) > 1) { + ltot <- length(el) + df <- as.data.frame(matrix(el, nrow = 1)) + nm2 <- paste0(nm, ".", sprintf("%02d", seq_len(ltot) - 1)) + + names(df) <- nm2 return(df) - }) + } - the_df <- do.call(cbind, end_df) - } else { - the_df <- cff_to_df(x) - } + df <- as.data.frame(x[y]) + names(df) <- nm + df + }) - the_df <- as.data.frame(the_df, - row.names = row.names, optional = optional, - ... - ) + the_df <- do.call(cbind, df_l) - return(the_df) + as.data.frame(the_df, row.names = row.names, optional = optional, ...) } +#' @export +as.data.frame.cff_pers_list <- function(x, row.names = NULL, optional = FALSE, + ..., prefix = "person") { + # For better dispatching + x <- as_cff(as.list(x)) + + len <- length(x) + key_len <- seq_len(len) + + df_l <- lapply(key_len, function(y) { + prefix <- paste0(prefix, ".", sprintf("%02d", y - 1)) + el <- x[[y]] + df <- as.data.frame(el, prefix = prefix) + df + }) + + the_df <- do.call(cbind, df_l) + + as.data.frame(the_df, row.names = row.names, optional = optional, ...) +} + +#' @export +as.data.frame.cff_pers <- function(x, row.names = NULL, optional = FALSE, + ..., prefix = NULL) { + # For better dispatching + x <- as_cff(as.list(x)) + + vals <- unlist(x) + nm <- names(vals) + nm <- gsub("-", "_", nm) + amat <- matrix(vals, nrow = 1, ncol = length(vals)) + m <- as.data.frame(amat) + + if (!is.null(clean_str(prefix))) nm <- paste0(prefix, ".", nm) + + names(m) <- nm + m + + as.data.frame(m, row.names = row.names, optional = optional, ...) +} + + +#' @export +as.data.frame.cff_ref_list <- function(x, row.names = NULL, optional = FALSE, + ..., prefix = "references") { + # For better dispatching + x <- as_cff(as.list(x)) + + len <- length(x) + key_len <- seq_len(len) + + df_l <- lapply(key_len, function(y) { + prefix <- paste0(prefix, ".", sprintf("%02d", y - 1)) + el <- x[[y]] + df <- as.data.frame(el, + row.names = row.names, optional = optional, + ..., prefix = prefix + ) + df + }) + + the_df <- do.call(cbind, df_l) + + as.data.frame(the_df, row.names = row.names, optional = optional, ...) +} + + +#' @export +as.data.frame.cff_ref <- function(x, row.names = NULL, optional = FALSE, + ..., prefix = NULL) { + # For better dispatching + # cff_ref is similar to cff, so we add only cff class + x <- as_cff(as.list(x)) + class(x) <- "cff" + + the_df <- as.data.frame(x) + + if (!is.null(clean_str(prefix))) { + names(the_df) <- paste0(prefix, ".", names(the_df)) + } + + the_df +} # nolint end #' Head diff --git a/R/utils-read-description.R b/R/utils-cff_read.R similarity index 90% rename from R/utils-read-description.R rename to R/utils-cff_read.R index e57ad475..f8d7f5ae 100644 --- a/R/utils-read-description.R +++ b/R/utils-cff_read.R @@ -2,7 +2,7 @@ #' Mapped to Description #' @noRd -parse_desc_abstract <- function(pkg) { +get_desc_abstract <- function(pkg) { abstract <- pkg$get("Description") abstract <- clean_str(abstract) @@ -18,7 +18,7 @@ parse_desc_abstract <- function(pkg) { #' Feeback needed: is this approach correct? #' On CRAN, only first aut is used #' @noRd -parse_desc_authors <- function(pkg, authors_roles = c("aut", "cre")) { +get_desc_authors <- function(pkg, authors_roles = c("aut", "cre")) { # This extracts all the persons persons <- as.person(pkg$get_authors()) @@ -26,15 +26,15 @@ parse_desc_authors <- function(pkg, authors_roles = c("aut", "cre")) { any(x$role %in% r) }, logical(1))] - parse_all_authors <- as_cff_person(authors) - parse_all_authors <- unique(parse_all_authors) + get_all_authors <- as_cff_person(authors) + get_all_authors <- unique(get_all_authors) - parse_all_authors + get_all_authors } #' Mapped to Maintainer #' @noRd -parse_desc_contacts <- function(pkg) { +get_desc_contacts <- function(pkg) { persons <- as.person(pkg$get_authors()) # Extract creators only @@ -42,14 +42,14 @@ parse_desc_contacts <- function(pkg) { "cre" %in% x$role }, logical(1))] - parse_all_contacts <- as_cff_person(contact) - parse_all_contacts <- unique(parse_all_contacts) - parse_all_contacts + get_all_contacts <- as_cff_person(contact) + get_all_contacts <- unique(get_all_contacts) + get_all_contacts } #' Mapped to Date, Date/Publication or Packaged #' @noRd -parse_desc_date_released <- function(pkg) { +get_desc_date_released <- function(pkg) { # See https://cran.r-project.org/doc/manuals/R-exts.html#The-DESCRIPTION-file date1 <- pkg$get("Date") # This is for CRAN/BioConductor packages @@ -84,7 +84,7 @@ parse_desc_date_released <- function(pkg) { #' Mapped to X-schema.org-keywords, as codemeta/codemetar #' @noRd -parse_desc_keywords <- function(pkg) { +get_desc_keywords <- function(pkg) { kword <- pkg$get("X-schema.org-keywords") kword <- clean_str(kword) @@ -113,7 +113,7 @@ parse_desc_keywords <- function(pkg) { #' Mapped to License #' @noRd -parse_desc_license <- function(pkg) { +get_desc_license <- function(pkg) { licenses <- pkg$get_field("License") # The schema only accepts two LiCENSES max @@ -144,7 +144,7 @@ parse_desc_license <- function(pkg) { #' Try to get Repository #' @noRd -parse_desc_repository <- function(pkg) { +get_desc_repository <- function(pkg) { name <- pkg$get("Package") repo <- clean_str(pkg$get("Repository")) @@ -178,7 +178,7 @@ parse_desc_repository <- function(pkg) { #' Mapped to Package & Title #' @noRd -parse_desc_title <- function(pkg) { +get_desc_title <- function(pkg) { title <- paste0( pkg$get("Package"), ": ", @@ -192,7 +192,7 @@ parse_desc_title <- function(pkg) { #' Mapped to URL and BugReports #' Additional urls as identifiers #' @noRd -parse_desc_urls <- function(pkg) { +get_desc_urls <- function(pkg) { url <- pkg$get_urls() # Get issue url @@ -269,7 +269,7 @@ parse_desc_urls <- function(pkg) { #' Mapped to Version #' @noRd -parse_desc_version <- function(pkg) { +get_desc_version <- function(pkg) { version <- pkg$get("Version") version <- clean_str(version) @@ -280,7 +280,7 @@ parse_desc_version <- function(pkg) { #' Extract topics as keywords for GH hosted packages #' @noRd -parse_ghtopics <- function(x) { +get_gh_topics <- function(x) { # Only for GitHub repos if (!is_github(x)) { return(NULL) diff --git a/R/utils-create.R b/R/utils-create.R index dfc46c2e..59d9e4ea 100644 --- a/R/utils-create.R +++ b/R/utils-create.R @@ -74,10 +74,10 @@ enhance_pref_authors <- function(cffobjend) { } -parse_dependencies <- function(desc_path, - instpack = as.character( - installed.packages()[, "Package"] - )) { +get_dependencies <- function(desc_path, + instpack = as.character( + installed.packages()[, "Package"] + )) { # nocov start if (!is.character(desc_path)) { return(NULL) @@ -145,8 +145,8 @@ parse_dependencies <- function(desc_path, if (file_exist_abort(dfile)) { pkg <- desc::desc(dfile) - mod$url <- parse_desc_urls(pkg)$url - mod$repository <- parse_desc_repository(pkg) + mod$url <- get_desc_urls(pkg)$url + mod$repository <- get_desc_repository(pkg) } mod <- drop_null(mod) diff --git a/R/utils-methods.R b/R/utils-methods.R index e0cdf19e..1230dc42 100644 --- a/R/utils-methods.R +++ b/R/utils-methods.R @@ -20,12 +20,13 @@ make_r_person <- function(x) { family <- NULL } else { given <- clean_str(x[["given-names"]]) - family <- paste(clean_str(x[["name-particle"]]), - clean_str(x[["family-names"]])) + family <- paste( + clean_str(x[["name-particle"]]), + clean_str(x[["family-names"]]) + ) # Add suffix with comma family <- paste0(c(family, clean_str(x[["name-suffix"]])), collapse = ", ") family <- clean_str(family) - } role <- clean_str(x$role) @@ -56,124 +57,3 @@ make_r_person <- function(x) { do.call(person, pers_list) } - - -# Utils for df ---- -unnamed_to_df <- function(key, nm) { - key_l <- as.integer(lengths(key)) - m <- matrix(unlist(key), nrow = 1) - df <- as.data.frame(m) - names(df) <- paste0(nm, ".", sprintf("%02d", seq_len(key_l) - 1)) - return(df) -} - -named_to_df <- function(key, nm) { - key_un <- unlist(key) - - - m <- matrix(as.character(key_un), nrow = 1) - df <- as.data.frame(m) - names(df) <- names(key_un) - return(df) -} - -nested_named_to_df <- function(key, nm) { - key_unlist <- key[[1]] - key_len <- seq_len(length(key_unlist)) - - df_l_type3 <- lapply(key_len, function(z) { - df <- cff_to_df(key_unlist[[z]]) - - # Prepend names - names(df) <- paste0(nm, ".", sprintf("%02d", z - 1), ".", names(df)) - return(df) - }) - - df_list_to_df(df_l_type3) -} - -prefcit_to_df <- function(key, nm = "preferred_citation.") { - key_df <- cff_to_df(key[[1]]) - names(key_df) <- paste0(nm, names(key_df)) - return(key_df) -} - -reflist_to_df <- function(key, nm) { - key_unlist <- key[[1]] - key_len <- seq_len(length(key_unlist)) - - prefix_key <- paste0(nm, ".", sprintf("%02d", key_len - 1), ".") - - df_l <- lapply(key_len, function(y) { - key_l <- key_unlist[y] - nm_pref <- prefix_key[y] - - dff <- prefcit_to_df(key_l, nm_pref) - - dff - }) - - final_df <- df_list_to_df(df_l) - - final_df -} - -df_list_to_df <- function(x) { - # Clean NULL - df_l_clean <- x[!vapply(x, is.null, logical(1))] - - final_df <- do.call(cbind, df_l_clean) - return(final_df) -} -cff_to_df <- function(x) { - # CFF has different models - # type 1: unnamed arrays - unnamed_array <- c("keywords", "languages", "patent-states") - - - # type 2: named arrays - named_array <- c( - "conference", "database-provider", "institution", - "location", "publisher" - ) - - - # type 3: nested named arrays - nested_named_array <- c( - "authors", "contact", "editors", "editors-series", - "recipients", "senders", "translators", "identifiers" - ) - - nms <- names(x) - x_len <- seq_len(length(x)) - - - df_l <- lapply(x_len, function(y) { - nm <- nms[y] - - if (nm %in% unnamed_array) { - return(unnamed_to_df(x[y], nm)) - } - if (nm %in% named_array) { - return(named_to_df(x[y], nm)) - } - if (nm %in% nested_named_array) { - return(nested_named_to_df(x[y], nm)) - } - if (nm == "preferred-citation") { - return(prefcit_to_df(x[y])) - } - - if (nm == "references") { - return(reflist_to_df(x[y], nm)) - } - - the_df <- as.data.frame(x[[y]]) - names(the_df) <- gsub("-", "_", nm) - return(the_df) - }) - - final_df <- df_list_to_df(df_l) - - return(final_df) -} diff --git a/README.md b/README.md index 27bc9086..3cb2e867 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ file and the `CITATION` file (if present) of your package. Note that **cffr** works best if your package pass `R CMD check/devtools::check()`. -As per 2024-03-07 there are at least 290 repos on GitHub using **cffr**. +As per 2024-03-07 there are at least 294 repos on GitHub using **cffr**. [Check them out here](https://github.com/search?q=cffr%20path%3A**%2FCITATION.cff&type=code). diff --git a/codemeta.json b/codemeta.json index 3fe94e44..0b7a2d84 100644 --- a/codemeta.json +++ b/codemeta.json @@ -200,7 +200,7 @@ }, "isPartOf": "https://ropensci.org", "keywords": ["attribution", "citation", "credit", "citation-files", "cff", "metadata", "r", "r-package", "citation-file-format", "rstats", "ropensci", "cran"], - "fileSize": "949.454KB", + "fileSize": "951.322KB", "citation": [ { "@type": "ScholarlyArticle", diff --git a/man/cff_class.Rd b/man/cff_class.Rd index 39968c0e..10895ccf 100644 --- a/man/cff_class.Rd +++ b/man/cff_class.Rd @@ -172,7 +172,7 @@ the_cff$authors #> location: The team garage as.person(the_cff$authors) -#> [1] "One Truly van der Real Person IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" +#> [1] "One Truly van der Real Person, IV (Citey, Excellent University, Niceplace, Arcadia, 22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io)" #> [2] "Entity Project Team Conference entity (22 Acacia Avenue, Citationburgh, Renfrewshire, C13 7X7, GB, , +44(0)141-323 4567, +44(0)141-323 45678, https://www.entity-project-team.io, 2017-01-01, 2017-01-31, The team garage)" }\if{html}{\out{}} } diff --git a/tests/testthat/_snaps/cff_read.md b/tests/testthat/_snaps/cff_read.md index 8e0e2073..afd28fcc 100644 --- a/tests/testthat/_snaps/cff_read.md +++ b/tests/testthat/_snaps/cff_read.md @@ -75,3 +75,94 @@ Message ! `meta` should be "NULL" or a object not a string. Using `meta = NULL` +# Creating cff from packages encoded in latin1 + + Code + cffobj + Output + cff-version: 1.2.0 + message: 'To cite package "surveillance" in publications use:' + type: software + license: GPL-2.0-only + title: 'surveillance: Temporal and Spatio-Temporal Modeling and Monitoring of Epidemic + Phenomena' + version: 1.19.1 + abstract: Statistical methods for the modeling and monitoring of time series of counts, + proportions and categorical data, as well as for the modeling of continuous-time + point processes of epidemic phenomena. The monitoring methods focus on aberration + detection in count data time series from public health surveillance of communicable + diseases, but applications could just as well originate from environmetrics, reliability + engineering, econometrics, or social sciences. The package implements many typical + outbreak detection procedures such as the (improved) Farrington algorithm, or the + negative binomial GLR-CUSUM method of Höhle and Paul (2008) . + A novel CUSUM approach combining logistic and multinomial logistic modeling is also + included. The package contains several real-world data sets, the ability to simulate + outbreak data, and to visualize the results of the monitoring in a temporal, spatial + or spatio-temporal fashion. A recent overview of the available monitoring procedures + is given by Salmon et al. (2016) . For the + retrospective analysis of epidemic spread, the package provides three endemic-epidemic + modeling frameworks with tools for visualization, likelihood inference, and simulation. + hhh4() estimates models for (multivariate) count time series following Paul and + Held (2011) and Meyer and Held (2014) . + twinSIR() models the susceptible-infectious-recovered (SIR) event history of a fixed + population, e.g, epidemics across farms or networks, as a multivariate point process + as proposed by Höhle (2009) . twinstim() + estimates self-exciting point process models for a spatio-temporal point pattern + of infective events, e.g., time-stamped geo-referenced surveillance data, as proposed + by Meyer et al. (2012) . A recent + overview of the implemented space-time modeling frameworks for epidemic phenomena + is given by Meyer et al. (2017) . + authors: + - family-names: Höhle + given-names: Michael + email: hoehle@math.su.se + orcid: https://orcid.org/0000-0002-0423-6702 + - family-names: Meyer + given-names: Sebastian + email: seb.meyer@fau.de + orcid: https://orcid.org/0000-0002-1791-9449 + - family-names: Paul + given-names: Michaela + repository: https://CRAN.R-project.org/package=surveillance + url: https://surveillance.R-Forge.R-project.org/ + date-released: '2021-03-30' + contact: + - family-names: Meyer + given-names: Sebastian + email: seb.meyer@fau.de + orcid: https://orcid.org/0000-0002-1791-9449 + references: + - type: article + title: 'Monitoring Count Time Series in R: Aberration Detection in Public Health + Surveillance' + authors: + - family-names: Salmon + given-names: Maëlle + - family-names: Schumacher + given-names: Dirk + - family-names: Höhle + given-names: Michael + journal: Journal of Statistical Software + year: '2016' + volume: '70' + issue: '10' + doi: 10.18637/jss.v070.i10 + start: '1' + end: '35' + - type: article + title: Spatio-Temporal Analysis of Epidemic Phenomena Using the R Package surveillance + authors: + - family-names: Meyer + given-names: Sebastian + - family-names: Held + given-names: Leonhard + - family-names: Höhle + given-names: Michael + journal: Journal of Statistical Software + year: '2017' + volume: '77' + issue: '11' + doi: 10.18637/jss.v077.i11 + start: '1' + end: '55' + diff --git a/tests/testthat/_snaps/encoding.md b/tests/testthat/_snaps/encoding.md deleted file mode 100644 index 2361ac24..00000000 --- a/tests/testthat/_snaps/encoding.md +++ /dev/null @@ -1,91 +0,0 @@ -# Creating cff from packages encoded in latin1 - - Code - cffobj - Output - cff-version: 1.2.0 - message: 'To cite package "surveillance" in publications use:' - type: software - license: GPL-2.0-only - title: 'surveillance: Temporal and Spatio-Temporal Modeling and Monitoring of Epidemic - Phenomena' - version: 1.19.1 - abstract: Statistical methods for the modeling and monitoring of time series of counts, - proportions and categorical data, as well as for the modeling of continuous-time - point processes of epidemic phenomena. The monitoring methods focus on aberration - detection in count data time series from public health surveillance of communicable - diseases, but applications could just as well originate from environmetrics, reliability - engineering, econometrics, or social sciences. The package implements many typical - outbreak detection procedures such as the (improved) Farrington algorithm, or the - negative binomial GLR-CUSUM method of Höhle and Paul (2008) . - A novel CUSUM approach combining logistic and multinomial logistic modeling is also - included. The package contains several real-world data sets, the ability to simulate - outbreak data, and to visualize the results of the monitoring in a temporal, spatial - or spatio-temporal fashion. A recent overview of the available monitoring procedures - is given by Salmon et al. (2016) . For the - retrospective analysis of epidemic spread, the package provides three endemic-epidemic - modeling frameworks with tools for visualization, likelihood inference, and simulation. - hhh4() estimates models for (multivariate) count time series following Paul and - Held (2011) and Meyer and Held (2014) . - twinSIR() models the susceptible-infectious-recovered (SIR) event history of a fixed - population, e.g, epidemics across farms or networks, as a multivariate point process - as proposed by Höhle (2009) . twinstim() - estimates self-exciting point process models for a spatio-temporal point pattern - of infective events, e.g., time-stamped geo-referenced surveillance data, as proposed - by Meyer et al. (2012) . A recent - overview of the implemented space-time modeling frameworks for epidemic phenomena - is given by Meyer et al. (2017) . - authors: - - family-names: Höhle - given-names: Michael - email: hoehle@math.su.se - orcid: https://orcid.org/0000-0002-0423-6702 - - family-names: Meyer - given-names: Sebastian - email: seb.meyer@fau.de - orcid: https://orcid.org/0000-0002-1791-9449 - - family-names: Paul - given-names: Michaela - repository: https://CRAN.R-project.org/package=surveillance - url: https://surveillance.R-Forge.R-project.org/ - date-released: '2021-03-30' - contact: - - family-names: Meyer - given-names: Sebastian - email: seb.meyer@fau.de - orcid: https://orcid.org/0000-0002-1791-9449 - references: - - type: article - title: 'Monitoring Count Time Series in R: Aberration Detection in Public Health - Surveillance' - authors: - - family-names: Salmon - given-names: Maëlle - - family-names: Schumacher - given-names: Dirk - - family-names: Höhle - given-names: Michael - journal: Journal of Statistical Software - year: '2016' - volume: '70' - issue: '10' - doi: 10.18637/jss.v070.i10 - start: '1' - end: '35' - - type: article - title: Spatio-Temporal Analysis of Epidemic Phenomena Using the R Package surveillance - authors: - - family-names: Meyer - given-names: Sebastian - - family-names: Held - given-names: Leonhard - - family-names: Höhle - given-names: Michael - journal: Journal of Statistical Software - year: '2017' - volume: '77' - issue: '11' - doi: 10.18637/jss.v077.i11 - start: '1' - end: '55' - diff --git a/tests/testthat/_snaps/methods.md b/tests/testthat/_snaps/methods.md index a09abb62..594b3b76 100644 --- a/tests/testthat/_snaps/methods.md +++ b/tests/testthat/_snaps/methods.md @@ -141,11 +141,11 @@ [136] "preferred_citation.number" [137] "preferred_citation.number_volumes" [138] "preferred_citation.pages" - [139] "preferred_citation.patent-states.00" - [140] "preferred_citation.patent-states.01" - [141] "preferred_citation.patent-states.02" - [142] "preferred_citation.patent-states.03" - [143] "preferred_citation.patent-states.04" + [139] "preferred_citation.patent_states.00" + [140] "preferred_citation.patent_states.01" + [141] "preferred_citation.patent_states.02" + [142] "preferred_citation.patent_states.03" + [143] "preferred_citation.patent_states.04" [144] "preferred_citation.pmcid" [145] "preferred_citation.repository" [146] "preferred_citation.repository_code" @@ -165,15 +165,15 @@ [160] "preferred_citation.conference.address" [161] "preferred_citation.conference.city" [162] "preferred_citation.conference.region" - [163] "preferred_citation.conference.post-code" + [163] "preferred_citation.conference.post_code" [164] "preferred_citation.conference.country" [165] "preferred_citation.conference.orcid" [166] "preferred_citation.conference.email" [167] "preferred_citation.conference.tel" [168] "preferred_citation.conference.fax" [169] "preferred_citation.conference.website" - [170] "preferred_citation.conference.date-start" - [171] "preferred_citation.conference.date-end" + [170] "preferred_citation.conference.date_start" + [171] "preferred_citation.conference.date_end" [172] "preferred_citation.conference.location" [173] "preferred_citation.authors.00.family_names" [174] "preferred_citation.authors.00.given_names" @@ -235,20 +235,20 @@ [230] "preferred_citation.contact.01.date_start" [231] "preferred_citation.contact.01.date_end" [232] "preferred_citation.contact.01.location" - [233] "preferred_citation.database-provider.name" - [234] "preferred_citation.database-provider.address" - [235] "preferred_citation.database-provider.city" - [236] "preferred_citation.database-provider.region" - [237] "preferred_citation.database-provider.post-code" - [238] "preferred_citation.database-provider.country" - [239] "preferred_citation.database-provider.orcid" - [240] "preferred_citation.database-provider.email" - [241] "preferred_citation.database-provider.tel" - [242] "preferred_citation.database-provider.fax" - [243] "preferred_citation.database-provider.website" - [244] "preferred_citation.database-provider.date-start" - [245] "preferred_citation.database-provider.date-end" - [246] "preferred_citation.database-provider.location" + [233] "preferred_citation.database_provider.name" + [234] "preferred_citation.database_provider.address" + [235] "preferred_citation.database_provider.city" + [236] "preferred_citation.database_provider.region" + [237] "preferred_citation.database_provider.post_code" + [238] "preferred_citation.database_provider.country" + [239] "preferred_citation.database_provider.orcid" + [240] "preferred_citation.database_provider.email" + [241] "preferred_citation.database_provider.tel" + [242] "preferred_citation.database_provider.fax" + [243] "preferred_citation.database_provider.website" + [244] "preferred_citation.database_provider.date_start" + [245] "preferred_citation.database_provider.date_end" + [246] "preferred_citation.database_provider.location" [247] "preferred_citation.editors.00.family_names" [248] "preferred_citation.editors.00.given_names" [249] "preferred_citation.editors.00.name_particle" @@ -279,77 +279,77 @@ [274] "preferred_citation.editors.01.date_start" [275] "preferred_citation.editors.01.date_end" [276] "preferred_citation.editors.01.location" - [277] "preferred_citation.editors-series.00.family_names" - [278] "preferred_citation.editors-series.00.given_names" - [279] "preferred_citation.editors-series.00.name_particle" - [280] "preferred_citation.editors-series.00.name_suffix" - [281] "preferred_citation.editors-series.00.alias" - [282] "preferred_citation.editors-series.00.affiliation" - [283] "preferred_citation.editors-series.00.address" - [284] "preferred_citation.editors-series.00.city" - [285] "preferred_citation.editors-series.00.region" - [286] "preferred_citation.editors-series.00.post_code" - [287] "preferred_citation.editors-series.00.country" - [288] "preferred_citation.editors-series.00.orcid" - [289] "preferred_citation.editors-series.00.email" - [290] "preferred_citation.editors-series.00.tel" - [291] "preferred_citation.editors-series.00.fax" - [292] "preferred_citation.editors-series.00.website" - [293] "preferred_citation.editors-series.01.name" - [294] "preferred_citation.editors-series.01.address" - [295] "preferred_citation.editors-series.01.city" - [296] "preferred_citation.editors-series.01.region" - [297] "preferred_citation.editors-series.01.post_code" - [298] "preferred_citation.editors-series.01.country" - [299] "preferred_citation.editors-series.01.orcid" - [300] "preferred_citation.editors-series.01.email" - [301] "preferred_citation.editors-series.01.tel" - [302] "preferred_citation.editors-series.01.fax" - [303] "preferred_citation.editors-series.01.website" - [304] "preferred_citation.editors-series.01.date_start" - [305] "preferred_citation.editors-series.01.date_end" - [306] "preferred_citation.editors-series.01.location" + [277] "preferred_citation.editors_series.00.family_names" + [278] "preferred_citation.editors_series.00.given_names" + [279] "preferred_citation.editors_series.00.name_particle" + [280] "preferred_citation.editors_series.00.name_suffix" + [281] "preferred_citation.editors_series.00.alias" + [282] "preferred_citation.editors_series.00.affiliation" + [283] "preferred_citation.editors_series.00.address" + [284] "preferred_citation.editors_series.00.city" + [285] "preferred_citation.editors_series.00.region" + [286] "preferred_citation.editors_series.00.post_code" + [287] "preferred_citation.editors_series.00.country" + [288] "preferred_citation.editors_series.00.orcid" + [289] "preferred_citation.editors_series.00.email" + [290] "preferred_citation.editors_series.00.tel" + [291] "preferred_citation.editors_series.00.fax" + [292] "preferred_citation.editors_series.00.website" + [293] "preferred_citation.editors_series.01.name" + [294] "preferred_citation.editors_series.01.address" + [295] "preferred_citation.editors_series.01.city" + [296] "preferred_citation.editors_series.01.region" + [297] "preferred_citation.editors_series.01.post_code" + [298] "preferred_citation.editors_series.01.country" + [299] "preferred_citation.editors_series.01.orcid" + [300] "preferred_citation.editors_series.01.email" + [301] "preferred_citation.editors_series.01.tel" + [302] "preferred_citation.editors_series.01.fax" + [303] "preferred_citation.editors_series.01.website" + [304] "preferred_citation.editors_series.01.date_start" + [305] "preferred_citation.editors_series.01.date_end" + [306] "preferred_citation.editors_series.01.location" [307] "preferred_citation.institution.name" [308] "preferred_citation.institution.address" [309] "preferred_citation.institution.city" [310] "preferred_citation.institution.region" - [311] "preferred_citation.institution.post-code" + [311] "preferred_citation.institution.post_code" [312] "preferred_citation.institution.country" [313] "preferred_citation.institution.orcid" [314] "preferred_citation.institution.email" [315] "preferred_citation.institution.tel" [316] "preferred_citation.institution.fax" [317] "preferred_citation.institution.website" - [318] "preferred_citation.institution.date-start" - [319] "preferred_citation.institution.date-end" + [318] "preferred_citation.institution.date_start" + [319] "preferred_citation.institution.date_end" [320] "preferred_citation.institution.location" [321] "preferred_citation.location.name" [322] "preferred_citation.location.address" [323] "preferred_citation.location.city" [324] "preferred_citation.location.region" - [325] "preferred_citation.location.post-code" + [325] "preferred_citation.location.post_code" [326] "preferred_citation.location.country" [327] "preferred_citation.location.orcid" [328] "preferred_citation.location.email" [329] "preferred_citation.location.tel" [330] "preferred_citation.location.fax" [331] "preferred_citation.location.website" - [332] "preferred_citation.location.date-start" - [333] "preferred_citation.location.date-end" + [332] "preferred_citation.location.date_start" + [333] "preferred_citation.location.date_end" [334] "preferred_citation.location.location" [335] "preferred_citation.publisher.name" [336] "preferred_citation.publisher.address" [337] "preferred_citation.publisher.city" [338] "preferred_citation.publisher.region" - [339] "preferred_citation.publisher.post-code" + [339] "preferred_citation.publisher.post_code" [340] "preferred_citation.publisher.country" [341] "preferred_citation.publisher.orcid" [342] "preferred_citation.publisher.email" [343] "preferred_citation.publisher.tel" [344] "preferred_citation.publisher.fax" [345] "preferred_citation.publisher.website" - [346] "preferred_citation.publisher.date-start" - [347] "preferred_citation.publisher.date-end" + [346] "preferred_citation.publisher.date_start" + [347] "preferred_citation.publisher.date_end" [348] "preferred_citation.publisher.location" [349] "preferred_citation.recipients.00.family_names" [350] "preferred_citation.recipients.00.given_names" @@ -492,11 +492,11 @@ [487] "references.00.number" [488] "references.00.number_volumes" [489] "references.00.pages" - [490] "references.00.patent-states.00" - [491] "references.00.patent-states.01" - [492] "references.00.patent-states.02" - [493] "references.00.patent-states.03" - [494] "references.00.patent-states.04" + [490] "references.00.patent_states.00" + [491] "references.00.patent_states.01" + [492] "references.00.patent_states.02" + [493] "references.00.patent_states.03" + [494] "references.00.patent_states.04" [495] "references.00.pmcid" [496] "references.00.repository" [497] "references.00.repository_code" @@ -516,15 +516,15 @@ [511] "references.00.conference.address" [512] "references.00.conference.city" [513] "references.00.conference.region" - [514] "references.00.conference.post-code" + [514] "references.00.conference.post_code" [515] "references.00.conference.country" [516] "references.00.conference.orcid" [517] "references.00.conference.email" [518] "references.00.conference.tel" [519] "references.00.conference.fax" [520] "references.00.conference.website" - [521] "references.00.conference.date-start" - [522] "references.00.conference.date-end" + [521] "references.00.conference.date_start" + [522] "references.00.conference.date_end" [523] "references.00.conference.location" [524] "references.00.authors.00.family_names" [525] "references.00.authors.00.given_names" @@ -586,20 +586,20 @@ [581] "references.00.contact.01.date_start" [582] "references.00.contact.01.date_end" [583] "references.00.contact.01.location" - [584] "references.00.database-provider.name" - [585] "references.00.database-provider.address" - [586] "references.00.database-provider.city" - [587] "references.00.database-provider.region" - [588] "references.00.database-provider.post-code" - [589] "references.00.database-provider.country" - [590] "references.00.database-provider.orcid" - [591] "references.00.database-provider.email" - [592] "references.00.database-provider.tel" - [593] "references.00.database-provider.fax" - [594] "references.00.database-provider.website" - [595] "references.00.database-provider.date-start" - [596] "references.00.database-provider.date-end" - [597] "references.00.database-provider.location" + [584] "references.00.database_provider.name" + [585] "references.00.database_provider.address" + [586] "references.00.database_provider.city" + [587] "references.00.database_provider.region" + [588] "references.00.database_provider.post_code" + [589] "references.00.database_provider.country" + [590] "references.00.database_provider.orcid" + [591] "references.00.database_provider.email" + [592] "references.00.database_provider.tel" + [593] "references.00.database_provider.fax" + [594] "references.00.database_provider.website" + [595] "references.00.database_provider.date_start" + [596] "references.00.database_provider.date_end" + [597] "references.00.database_provider.location" [598] "references.00.editors.00.family_names" [599] "references.00.editors.00.given_names" [600] "references.00.editors.00.name_particle" @@ -630,77 +630,77 @@ [625] "references.00.editors.01.date_start" [626] "references.00.editors.01.date_end" [627] "references.00.editors.01.location" - [628] "references.00.editors-series.00.family_names" - [629] "references.00.editors-series.00.given_names" - [630] "references.00.editors-series.00.name_particle" - [631] "references.00.editors-series.00.name_suffix" - [632] "references.00.editors-series.00.alias" - [633] "references.00.editors-series.00.affiliation" - [634] "references.00.editors-series.00.address" - [635] "references.00.editors-series.00.city" - [636] "references.00.editors-series.00.region" - [637] "references.00.editors-series.00.post_code" - [638] "references.00.editors-series.00.country" - [639] "references.00.editors-series.00.orcid" - [640] "references.00.editors-series.00.email" - [641] "references.00.editors-series.00.tel" - [642] "references.00.editors-series.00.fax" - [643] "references.00.editors-series.00.website" - [644] "references.00.editors-series.01.name" - [645] "references.00.editors-series.01.address" - [646] "references.00.editors-series.01.city" - [647] "references.00.editors-series.01.region" - [648] "references.00.editors-series.01.post_code" - [649] "references.00.editors-series.01.country" - [650] "references.00.editors-series.01.orcid" - [651] "references.00.editors-series.01.email" - [652] "references.00.editors-series.01.tel" - [653] "references.00.editors-series.01.fax" - [654] "references.00.editors-series.01.website" - [655] "references.00.editors-series.01.date_start" - [656] "references.00.editors-series.01.date_end" - [657] "references.00.editors-series.01.location" + [628] "references.00.editors_series.00.family_names" + [629] "references.00.editors_series.00.given_names" + [630] "references.00.editors_series.00.name_particle" + [631] "references.00.editors_series.00.name_suffix" + [632] "references.00.editors_series.00.alias" + [633] "references.00.editors_series.00.affiliation" + [634] "references.00.editors_series.00.address" + [635] "references.00.editors_series.00.city" + [636] "references.00.editors_series.00.region" + [637] "references.00.editors_series.00.post_code" + [638] "references.00.editors_series.00.country" + [639] "references.00.editors_series.00.orcid" + [640] "references.00.editors_series.00.email" + [641] "references.00.editors_series.00.tel" + [642] "references.00.editors_series.00.fax" + [643] "references.00.editors_series.00.website" + [644] "references.00.editors_series.01.name" + [645] "references.00.editors_series.01.address" + [646] "references.00.editors_series.01.city" + [647] "references.00.editors_series.01.region" + [648] "references.00.editors_series.01.post_code" + [649] "references.00.editors_series.01.country" + [650] "references.00.editors_series.01.orcid" + [651] "references.00.editors_series.01.email" + [652] "references.00.editors_series.01.tel" + [653] "references.00.editors_series.01.fax" + [654] "references.00.editors_series.01.website" + [655] "references.00.editors_series.01.date_start" + [656] "references.00.editors_series.01.date_end" + [657] "references.00.editors_series.01.location" [658] "references.00.institution.name" [659] "references.00.institution.address" [660] "references.00.institution.city" [661] "references.00.institution.region" - [662] "references.00.institution.post-code" + [662] "references.00.institution.post_code" [663] "references.00.institution.country" [664] "references.00.institution.orcid" [665] "references.00.institution.email" [666] "references.00.institution.tel" [667] "references.00.institution.fax" [668] "references.00.institution.website" - [669] "references.00.institution.date-start" - [670] "references.00.institution.date-end" + [669] "references.00.institution.date_start" + [670] "references.00.institution.date_end" [671] "references.00.institution.location" [672] "references.00.location.name" [673] "references.00.location.address" [674] "references.00.location.city" [675] "references.00.location.region" - [676] "references.00.location.post-code" + [676] "references.00.location.post_code" [677] "references.00.location.country" [678] "references.00.location.orcid" [679] "references.00.location.email" [680] "references.00.location.tel" [681] "references.00.location.fax" [682] "references.00.location.website" - [683] "references.00.location.date-start" - [684] "references.00.location.date-end" + [683] "references.00.location.date_start" + [684] "references.00.location.date_end" [685] "references.00.location.location" [686] "references.00.publisher.name" [687] "references.00.publisher.address" [688] "references.00.publisher.city" [689] "references.00.publisher.region" - [690] "references.00.publisher.post-code" + [690] "references.00.publisher.post_code" [691] "references.00.publisher.country" [692] "references.00.publisher.orcid" [693] "references.00.publisher.email" [694] "references.00.publisher.tel" [695] "references.00.publisher.fax" [696] "references.00.publisher.website" - [697] "references.00.publisher.date-start" - [698] "references.00.publisher.date-end" + [697] "references.00.publisher.date_start" + [698] "references.00.publisher.date_end" [699] "references.00.publisher.location" [700] "references.00.recipients.00.family_names" [701] "references.00.recipients.00.given_names" diff --git a/tests/testthat/_snaps/parse_dependencies.md b/tests/testthat/_snaps/parse_dependencies.md deleted file mode 100644 index 3c758832..00000000 --- a/tests/testthat/_snaps/parse_dependencies.md +++ /dev/null @@ -1,41 +0,0 @@ -# Check dependencies - - Code - print(selected) - Output - - title: 'R: A Language and Environment for Statistical Computing' - url: https://www.R-project.org/ - - title: cli - url: https://cli.r-lib.org - repository: https://CRAN.R-project.org/package=cli - - title: desc - url: https://desc.r-lib.org/ - repository: https://CRAN.R-project.org/package=desc - - title: jsonlite - url: https://jeroen.r-universe.dev/jsonlite - repository: https://CRAN.R-project.org/package=jsonlite - - title: jsonvalidate - url: https://docs.ropensci.org/jsonvalidate/ - repository: https://CRAN.R-project.org/package=jsonvalidate - - title: yaml - url: https://github.com/vubiostat/r-yaml/ - repository: https://CRAN.R-project.org/package=yaml - - title: bibtex - url: https://docs.ropensci.org/bibtex/ - repository: https://CRAN.R-project.org/package=bibtex - - title: knitr - url: https://yihui.org/knitr/ - repository: https://CRAN.R-project.org/package=knitr - - title: lifecycle - url: https://lifecycle.r-lib.org/ - repository: https://CRAN.R-project.org/package=lifecycle - - title: rmarkdown - url: https://pkgs.rstudio.com/rmarkdown/ - repository: https://CRAN.R-project.org/package=rmarkdown - - title: testthat - url: https://testthat.r-lib.org - repository: https://CRAN.R-project.org/package=testthat - - title: usethis - url: https://usethis.r-lib.org - repository: https://CRAN.R-project.org/package=usethis - diff --git a/tests/testthat/_snaps/utils-cff_ref.md b/tests/testthat/_snaps/utils-cff_ref.md new file mode 100644 index 00000000..350bc528 --- /dev/null +++ b/tests/testthat/_snaps/utils-cff_ref.md @@ -0,0 +1,9 @@ +# fallback_dates + + Code + as_cff(p2) + Output + date-published: '2025-09-25' + month: '9' + year: '2025' + diff --git a/tests/testthat/_snaps/merge_desc_cit.md b/tests/testthat/_snaps/utils-create.md similarity index 95% rename from tests/testthat/_snaps/merge_desc_cit.md rename to tests/testthat/_snaps/utils-create.md index 28dc3cec..906fbdb4 100644 --- a/tests/testthat/_snaps/merge_desc_cit.md +++ b/tests/testthat/_snaps/utils-create.md @@ -1067,3 +1067,44 @@ isbn: 978-3-319-24277-4 url: https://ggplot2.tidyverse.org +# Check dependencies + + Code + print(selected) + Output + - title: 'R: A Language and Environment for Statistical Computing' + url: https://www.R-project.org/ + - title: cli + url: https://cli.r-lib.org + repository: https://CRAN.R-project.org/package=cli + - title: desc + url: https://desc.r-lib.org/ + repository: https://CRAN.R-project.org/package=desc + - title: jsonlite + url: https://jeroen.r-universe.dev/jsonlite + repository: https://CRAN.R-project.org/package=jsonlite + - title: jsonvalidate + url: https://docs.ropensci.org/jsonvalidate/ + repository: https://CRAN.R-project.org/package=jsonvalidate + - title: yaml + url: https://github.com/vubiostat/r-yaml/ + repository: https://CRAN.R-project.org/package=yaml + - title: bibtex + url: https://docs.ropensci.org/bibtex/ + repository: https://CRAN.R-project.org/package=bibtex + - title: knitr + url: https://yihui.org/knitr/ + repository: https://CRAN.R-project.org/package=knitr + - title: lifecycle + url: https://lifecycle.r-lib.org/ + repository: https://CRAN.R-project.org/package=lifecycle + - title: rmarkdown + url: https://pkgs.rstudio.com/rmarkdown/ + repository: https://CRAN.R-project.org/package=rmarkdown + - title: testthat + url: https://testthat.r-lib.org + repository: https://CRAN.R-project.org/package=testthat + - title: usethis + url: https://usethis.r-lib.org + repository: https://CRAN.R-project.org/package=usethis + diff --git a/tests/testthat/test-cff_read.R b/tests/testthat/test-cff_read.R index 753d7de7..4eaac8b3 100644 --- a/tests/testthat/test-cff_read.R +++ b/tests/testthat/test-cff_read.R @@ -51,6 +51,17 @@ test_that("cff_read DESCRIPTION", { ) expect_identical(f1_1, f2_1) + + skip_on_cran() + # With gh keywords + f <- system.file("examples/DESCRIPTION_posit_package_manager", + package = "cffr" + ) + fno <- cff_read_description(f, gh_keywords = FALSE) + f2 <- cff_read_description(f, gh_keywords = TRUE) + + expect_false(is.null(f2$keywords)) + expect_gt(length(f2$keywords), length(fno$keywords)) }) @@ -204,8 +215,35 @@ test_that("Corrupt CITATION", { # Internal - desc_path <- system.file("x", package = "cffr") + desc_path <- system.file("examples/DESCRIPTION_basic", + package = "cffr" + ) expect_silent(anull <- cff_safe_read_citation(desc_path = desc_path, tmp)) expect_null(anull) }) + +test_that("Creating cff from packages encoded in latin1", { + # Surveillance package + desc_path <- system.file("examples/DESCRIPTION_surveillance", + package = "cffr" + ) + cit_path <- system.file("examples/CITATION_surveillance", package = "cffr") + + expect_true(desc::desc(desc_path)$get("Encoding") == "latin1") + + # Parse citation + bib <- cff_safe_read_citation(desc_path, cit_path) + + expect_true("UTF-8" %in% Encoding(unlist(bib))) + expect_false("latin1" %in% Encoding(unlist(bib))) + + # Create cff + cffobj <- cff_create(desc_path, keys = list( + references = bib + )) + + expect_s3_class(cffobj, "cff") + expect_snapshot(cffobj) + expect_true(cff_validate(cffobj, verbose = FALSE)) +}) diff --git a/tests/testthat/test-encoding.R b/tests/testthat/test-encoding.R deleted file mode 100644 index 243f7b49..00000000 --- a/tests/testthat/test-encoding.R +++ /dev/null @@ -1,24 +0,0 @@ -test_that("Creating cff from packages encoded in latin1", { - # Surveillance package - desc_path <- system.file("examples/DESCRIPTION_surveillance", - package = "cffr" - ) - cit_path <- system.file("examples/CITATION_surveillance", package = "cffr") - - expect_true(desc::desc(desc_path)$get("Encoding") == "latin1") - - # Parse citation - bib <- cff_safe_read_citation(desc_path, cit_path) - - expect_true("UTF-8" %in% Encoding(unlist(bib))) - expect_false("latin1" %in% Encoding(unlist(bib))) - - # Create cff - cffobj <- cff_create(desc_path, keys = list( - references = bib - )) - - expect_s3_class(cffobj, "cff") - expect_snapshot(cffobj) - expect_true(cff_validate(cffobj, verbose = FALSE)) -}) diff --git a/tests/testthat/test-methods.R b/tests/testthat/test-methods.R index aea7b623..a9e16f55 100644 --- a/tests/testthat/test-methods.R +++ b/tests/testthat/test-methods.R @@ -197,7 +197,6 @@ test_that("as.person method names and particles", { pers_bib <- toBibtex(as.person(cf)) again <- as_cff_person(pers_bib) expect_identical(cf, again) - }) test_that("Errors on other as.person methods", { diff --git a/tests/testthat/test-parse_dependencies.R b/tests/testthat/test-parse_dependencies.R deleted file mode 100644 index b4b46975..00000000 --- a/tests/testthat/test-parse_dependencies.R +++ /dev/null @@ -1,13 +0,0 @@ -test_that("Check dependencies", { - skip_on_cran() - deps <- parse_dependencies(system.file("DESCRIPTION", package = "cffr")) - - # Extract selected fields - selected <- lapply(deps, function(x) { - y <- x[names(x) %in% c("title", "url", "repository")] - return(y) - }) - - class(selected) <- "cff" - expect_snapshot(print(selected)) -}) diff --git a/tests/testthat/test-utils-bib.R b/tests/testthat/test-utils-bib.R new file mode 100644 index 00000000..b6c7311a --- /dev/null +++ b/tests/testthat/test-utils-bib.R @@ -0,0 +1,8 @@ +test_that("get_bib_month", { + x <- list(month = "1") + expect_identical(get_bib_month(x), "jan") + + x <- list("date-published" = "2027-02-10") + + expect_identical(get_bib_month(x), "feb") +}) diff --git a/tests/testthat/test-parse_desc_license.R b/tests/testthat/test-utils-cff_create.R similarity index 100% rename from tests/testthat/test-parse_desc_license.R rename to tests/testthat/test-utils-cff_create.R diff --git a/tests/testthat/test-utils-cff_ref.R b/tests/testthat/test-utils-cff_ref.R new file mode 100644 index 00000000..138a45ce --- /dev/null +++ b/tests/testthat/test-utils-cff_ref.R @@ -0,0 +1,8 @@ +test_that("fallback_dates", { + xinit <- list(year = "2025") + p <- fallback_dates(list(year = "2025")) + expect_identical(xinit, p) + + p2 <- fallback_dates(list("date-published" = "2025-09-25")) + expect_snapshot(as_cff(p2)) +}) diff --git a/tests/testthat/test-merge_desc_cit.R b/tests/testthat/test-utils-create.R similarity index 64% rename from tests/testthat/test-merge_desc_cit.R rename to tests/testthat/test-utils-create.R index 44040d83..b69c295c 100644 --- a/tests/testthat/test-merge_desc_cit.R +++ b/tests/testthat/test-utils-create.R @@ -18,3 +18,17 @@ test_that("Merge all DESCRIPTION files with CITATION_basic", { expect_true(cff_validate(merged, verbose = FALSE)) } }) + +test_that("Check dependencies", { + skip_on_cran() + deps <- get_dependencies(system.file("DESCRIPTION", package = "cffr")) + + # Extract selected fields + selected <- lapply(deps, function(x) { + y <- x[names(x) %in% c("title", "url", "repository")] + return(y) + }) + + class(selected) <- "cff" + expect_snapshot(print(selected)) +})