From 957d73707d86fb7c7a0f89cc34059a3a40d7e83b Mon Sep 17 00:00:00 2001 From: Awa Synthia Date: Sat, 5 Oct 2024 08:12:54 +0300 Subject: [PATCH 01/41] Add parameter definitions to acc2lin.R Signed-off-by: Awa Synthia --- R/acc2lin.R | 36 ++++++++++++++++++++++++------------ man/acc2lin.Rd | 3 ++- man/add_lins.Rd | 18 +++++++++++++++++- man/efetch_ipg.Rd | 2 +- man/ipg2lin.Rd | 3 ++- 5 files changed, 46 insertions(+), 16 deletions(-) diff --git a/R/acc2lin.R b/R/acc2lin.R index f8d71949..8570677e 100644 --- a/R/acc2lin.R +++ b/R/acc2lin.R @@ -25,18 +25,25 @@ sink.reset <- function() { #' Add Lineages #' -#' @param df -#' @param acc_col -#' @param assembly_path -#' @param lineagelookup_path -#' @param ipgout_path -#' @param plan +#' @param df A `data.frame` containing the input data. One column must contain +#' the accession numbers. +#' @param acc_col A string specifying the column name in `df` that holds the +#' accession numbers. Defaults to `"AccNum"`. +#' @param assembly_path A string specifying the path to the `assembly_summary.txt` +#' file. This file contains metadata about assemblies. +#' @param lineagelookup_path A string specifying the path to the lineage lookup +#' file, which contains a mapping from tax IDs to their corresponding lineages. +#' @param ipgout_path (Optional) A string specifying the path where IPG database +#' fetch results will be saved. If `NULL`, the results are not written to a file. +#' @param plan A string specifying the parallelization strategy for the future +#' package, such as `"sequential"` or `"multisession"`. #' #' @importFrom dplyr pull #' @importFrom magrittr %>% #' @importFrom rlang sym #' -#' @return Describe return, in detail +#' @return A `data.frame` that combines the original `df` with the lineage +#' information. #' @export #' #' @examples @@ -78,9 +85,12 @@ add_lins <- function(df, acc_col = "AccNum", assembly_path, #' (taxid to lineage mapping). This file can be generated using the #' @param ipgout_path Path to write the results of the efetch run of the accessions #' on the ipg database. If NULL, the file will not be written. Defaults to NULL -#' @param plan +#' @param plan A string specifying the parallelization strategy for the future +#' package, such as `"sequential"` or `"multisession"`. #' -#' @return Describe return, in detail +#' @return A `data.table` that contains the lineage information, mapping protein +#' accessions to their tax IDs and lineages. +#' @export #' @export #' #' @examples @@ -112,13 +122,14 @@ acc2lin <- function(accessions, assembly_path, lineagelookup_path, ipgout_path = #' @param accnums Character vector containing the accession numbers to query on #' the ipg database #' @param out_path Path to write the efetch results to -#' @param plan +#' @param plan A string specifying the parallelization strategy for the future +#' package, such as `"sequential"` or `"multisession"`. #' #' @importFrom furrr future_map #' @importFrom future plan #' @importFrom rentrez entrez_fetch #' -#' @return Describe return, in detail +#' @return No return value. The function writes the fetched results to `out_path`. #' @export #' #' @examples @@ -186,7 +197,8 @@ efetch_ipg <- function(accnums, out_path, plan = "sequential") { #' #' @importFrom data.table fread #' -#' @return Describe return, in detail +#' @return A `data.table` with the lineage information for the provided protein +#' accessions. #' @export #' #' @examples diff --git a/man/acc2lin.Rd b/man/acc2lin.Rd index 6255b290..1d9e6854 100644 --- a/man/acc2lin.Rd +++ b/man/acc2lin.Rd @@ -35,7 +35,8 @@ on the ipg database. If NULL, the file will not be written. Defaults to NULL} \item{plan}{} } \value{ -Describe return, in detail +A \code{data.table} that contains the lineage information, mapping protein +accessions to their tax IDs and lineages. } \description{ This function combines 'efetch_ipg()' and 'ipg2lin()' to map a set diff --git a/man/add_lins.Rd b/man/add_lins.Rd index 226e428d..cbf9752b 100644 --- a/man/add_lins.Rd +++ b/man/add_lins.Rd @@ -23,10 +23,26 @@ add_lins( ) } \arguments{ +\item{df}{A \code{data.frame} containing the input data. One column must contain +the accession numbers.} + +\item{acc_col}{A string specifying the column name in \code{df} that holds the +accession numbers. Defaults to \code{"AccNum"}.} + +\item{assembly_path}{A string specifying the path to the \code{assembly_summary.txt} +file. This file contains metadata about assemblies.} + +\item{lineagelookup_path}{A string specifying the path to the lineage lookup +file, which contains a mapping from tax IDs to their corresponding lineages.} + +\item{ipgout_path}{(Optional) A string specifying the path where IPG database +fetch results will be saved. If \code{NULL}, the results are not written to a file.} + \item{plan}{} } \value{ -Describe return, in detail +A \code{data.frame} that combines the original \code{df} with the lineage +information. } \description{ Add Lineages diff --git a/man/efetch_ipg.Rd b/man/efetch_ipg.Rd index ec5b6bcb..dc3fef18 100644 --- a/man/efetch_ipg.Rd +++ b/man/efetch_ipg.Rd @@ -20,7 +20,7 @@ the ipg database} the ipg database} } \value{ -Describe return, in detail +No return value. The function writes the fetched results to \code{out_path}. } \description{ Perform efetch on the ipg database and write the results to out_path diff --git a/man/ipg2lin.Rd b/man/ipg2lin.Rd index 3a14eada..03240179 100644 --- a/man/ipg2lin.Rd +++ b/man/ipg2lin.Rd @@ -35,7 +35,8 @@ file} This file can be generated using the "DownloadAssemblySummary()" function} } \value{ -Describe return, in detail +A \code{data.table} with the lineage information for the provided protein +accessions. } \description{ Takes the resulting file of an efetch run on the ipg database and From 323a06f69109f0e9369df5879bf91f14b887001d Mon Sep 17 00:00:00 2001 From: Awa Synthia Date: Sat, 5 Oct 2024 08:33:38 +0300 Subject: [PATCH 02/41] Add parameter definitions to tree.R Signed-off-by: Awa Synthia --- R/tree.R | 38 ++++++++++++++++++++++++++++---------- man/convert_fa2tre.Rd | 20 +++++++++++++++++++- man/generate_fa2tre.Rd | 12 +++++++++--- man/generate_trees.Rd | 12 +++++++++++- 4 files changed, 67 insertions(+), 15 deletions(-) diff --git a/R/tree.R b/R/tree.R index 01e9ead5..a4501b7d 100755 --- a/R/tree.R +++ b/R/tree.R @@ -37,14 +37,23 @@ ## !! FastTree will only work if there are unique sequence names!! #' convert_fa2tre #' -#' @param fa_path -#' @param tre_path -#' @param fasttree_path +#' @param fa_path Path to the input FASTA alignment file (.fa). Default is the +#' path to "data/alns/pspa_snf7.fa". +#' @param tre_path Path to the output file where the generated tree (.tre) will +#' be saved. Default is the path to "data/alns/pspa_snf7.tre". +#' @param fasttree_path Path to the FastTree executable, which is used to +#' generate the phylogenetic tree. Default is "src/FastTree". #' -#' @return +#' @return No return value. The function generates a tree file (.tre) from the +#' input FASTA file. #' @export #' #' @examples +#' \dontrun{ +#' convert_fa2tre(here("data/alns/pspa_snf7.fa"), +#' here("data/alns/pspa_snf7.tre"), +#' here("src/FastTree") +#' } convert_fa2tre <- function(fa_path = here("data/alns/pspa_snf7.fa"), tre_path = here("data/alns/pspa_snf7.tre"), fasttree_path = here("src/FastTree")) { @@ -72,16 +81,22 @@ convert_fa2tre <- function(fa_path = here("data/alns/pspa_snf7.fa"), #' @description #' Generate Trees for ALL fasta files in "data/alns" #' -#' @param aln_path +#' @param aln_path Path to the directory containing all the alignment FASTA +#' files (.fa) for which trees will be generated. Default is "data/alns/". +#' #' #' @importFrom here here #' @importFrom purrr pmap #' @importFrom stringr str_replace_all #' -#' @return +#' @return No return value. The function generates tree files (.tre) for each +#' alignment file in the specified directory. #' @export #' #' @examples +#' \dontrun{ +#' generate_trees(here("data/alns/")) +#' } generate_trees <- function(aln_path = here("data/alns/")) { # finding all fasta alignment files fa_filenames <- list.files(path = aln_path, pattern = "*.fa") @@ -111,16 +126,19 @@ generate_trees <- function(aln_path = here("data/alns/")) { #' @description #' Generating phylogenetic tree from alignment file '.fa' #' -#' @param fa_file Character. Path to file. -#' Default is 'pspa_snf7.fa' -#' @param out_file +#' @param fa_file Character. Path to the alignment FASTA file (.fa) from which +#' the phylogenetic tree will be generated. Default is 'pspa_snf7.fa'. +#' @param out_file Path to the output file where the generated tree (.tre) will +#' be saved. Default is "data/alns/pspa_snf7.tre". #' #' @importFrom ape write.tree #' @importFrom phangorn bootstrap.pml dist.ml NJ modelTest phyDat plotBS pml pml.control pratchet optim.parsimony optim.pml read.phyDat upgma #' @importFrom seqinr dist.alignment read.alignment #' @importFrom stats logLik #' -#' @return +#' @return No return value. The function generates a phylogenetic tree file +#' (.tre) based on different approaches like Neighbor Joining, UPGMA, and +#' Maximum Likelihood. #' @export #' #' @details The alignment file would need two columns: 1. accession + diff --git a/man/convert_fa2tre.Rd b/man/convert_fa2tre.Rd index 87c59d67..e86ea812 100644 --- a/man/convert_fa2tre.Rd +++ b/man/convert_fa2tre.Rd @@ -11,8 +11,26 @@ convert_fa2tre( ) } \arguments{ -\item{fasttree_path}{} +\item{fa_path}{Path to the input FASTA alignment file (.fa). Default is the +path to "data/alns/pspa_snf7.fa".} + +\item{tre_path}{Path to the output file where the generated tree (.tre) will +be saved. Default is the path to "data/alns/pspa_snf7.tre".} + +\item{fasttree_path}{Path to the FastTree executable, which is used to +generate the phylogenetic tree. Default is "src/FastTree".} +} +\value{ +No return value. The function generates a tree file (.tre) from the +input FASTA file. } \description{ convert_fa2tre } +\examples{ +\dontrun{ +convert_fa2tre(here("data/alns/pspa_snf7.fa"), + here("data/alns/pspa_snf7.tre"), + here("src/FastTree") +} +} diff --git a/man/generate_fa2tre.Rd b/man/generate_fa2tre.Rd index b70848bb..ca7bce5e 100644 --- a/man/generate_fa2tre.Rd +++ b/man/generate_fa2tre.Rd @@ -10,10 +10,16 @@ generate_fa2tre( ) } \arguments{ -\item{fa_file}{Character. Path to file. -Default is 'pspa_snf7.fa'} +\item{fa_file}{Character. Path to the alignment FASTA file (.fa) from which +the phylogenetic tree will be generated. Default is 'pspa_snf7.fa'.} -\item{out_file}{} +\item{out_file}{Path to the output file where the generated tree (.tre) will +be saved. Default is "data/alns/pspa_snf7.tre".} +} +\value{ +No return value. The function generates a phylogenetic tree file +(.tre) based on different approaches like Neighbor Joining, UPGMA, and +Maximum Likelihood. } \description{ Generating phylogenetic tree from alignment file '.fa' diff --git a/man/generate_trees.Rd b/man/generate_trees.Rd index 43bd7243..0f1180ea 100644 --- a/man/generate_trees.Rd +++ b/man/generate_trees.Rd @@ -7,8 +7,18 @@ generate_trees(aln_path = here("data/alns/")) } \arguments{ -\item{aln_path}{} +\item{aln_path}{Path to the directory containing all the alignment FASTA +files (.fa) for which trees will be generated. Default is "data/alns/".} +} +\value{ +No return value. The function generates tree files (.tre) for each +alignment file in the specified directory. } \description{ Generate Trees for ALL fasta files in "data/alns" } +\examples{ +\dontrun{ +generate_trees(here("data/alns/")) +} +} From 2d9acb8b335a9dc281b7a4b460706e90e1a3d218 Mon Sep 17 00:00:00 2001 From: Awa Synthia Date: Sat, 5 Oct 2024 08:58:46 +0300 Subject: [PATCH 03/41] Add parameter definitions to summarize.R Signed-off-by: Awa Synthia --- NAMESPACE | 1 - R/summarize.R | 157 +++++++++++++++++++++++++++++++---------- man/count_bycol.Rd | 22 +++++- man/elements2words.Rd | 23 ++++-- man/filter_freq.Rd | 10 ++- man/summ.DA.Rd | 13 +++- man/summ.DA.byLin.Rd | 9 ++- man/summ.GC.Rd | 14 +++- man/summ.GC.byDALin.Rd | 15 +++- man/summarize_bylin.Rd | 15 +++- man/total_counts.Rd | 24 +++++-- man/words2wc.Rd | 11 ++- 12 files changed, 249 insertions(+), 65 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 16cf0813..9d73120a 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -131,7 +131,6 @@ importFrom(dplyr,if_else) importFrom(dplyr,left_join) importFrom(dplyr,mutate) importFrom(dplyr,n) -importFrom(dplyr,n_distinct) importFrom(dplyr,pull) importFrom(dplyr,relocate) importFrom(dplyr,right_join) diff --git a/R/summarize.R b/R/summarize.R index a9b13e43..e03ca463 100644 --- a/R/summarize.R +++ b/R/summarize.R @@ -91,18 +91,31 @@ filter_by_doms <- function(prot, column = "DomArch", doms_keep = c(), doms_remov ## Function to obtain element counts (DA, GC) #' Count Bycol #' -#' @param prot -#' @param column -#' @param min.freq +#' @param prot A data frame containing the dataset to analyze, typically with +#' multiple columns including the one specified by the `column` parameter. +#' @param column A character string specifying the name of the column to analyze. +#' The default is "DomArch". +#' @param min.freq An integer specifying the minimum frequency an element must +#' have to be included in the output. Default is 1. #' #' @importFrom dplyr arrange as_tibble filter select #' -#' @return Describe return, in detail +#' @return A tibble with two columns: +#' \describe{ +#' \item{`column`}{The unique elements from the specified column +#' (e.g., "DomArch").} +#' \item{`freq`}{The frequency of each element, i.e., the number of times +#' each element appears in the specified column.} +#' } +#' The tibble is filtered to only include elements that have a frequency +#' greater than or equal to `min.freq` and does not include elements with `NA` +#' values or those starting with a hyphen ("-"). +#' #' @export #' #' @examples #' \dontrun{ -#' count_bycol() +#' count_bycol(prot = my_data, column = "DomArch", min.freq = 10) #' } count_bycol <- function(prot = prot, column = "DomArch", min.freq = 1) { counts <- prot %>% @@ -123,19 +136,30 @@ count_bycol <- function(prot = prot, column = "DomArch", min.freq = 1) { #' Break string ELEMENTS into WORDS for domain architecture (DA) and genomic #' context (GC) #' -#' @param prot [dataframe] -#' @param column [string] column name -#' @param conversion_type [string] type of conversion: 'da2doms': domain architectures to -#' domains. 'gc2da' genomic context to domain architectures +#' @param prot A dataframe containing the dataset to analyze. The specified +#' `column` contains the string elements to be processed. +#' @param column A character string specifying the name of the column to analyze. +#' Default is "DomArch". +#' @param conversion_type A character string specifying the type of conversion. +#' Two options are available: +#' \describe{ +#' \item{`da2doms`}{Convert domain architectures into individual domains by +#' replacing `+` symbols with spaces.} +#' \item{`gc2da`}{Convert genomic context into domain architectures by +#' replacing directional symbols (`<-`, `->`, and `|`) with spaces.} +#' } #' #' @importFrom dplyr pull #' @importFrom stringr str_replace_all #' -#' @return [string] with words delimited by spaces +#' @return A single string where elements are delimited by spaces. The function +#' performs necessary substitutions based on the `conversion_type` and cleans up +#' extraneous characters like newlines, tabs, and multiple spaces. #' #' @examples #' \dontrun{ -#' tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> elements2words() +#' tibble::tibble(DomArch = c("aaa+bbb", +#' "a+b", "b+c", "b-c")) |> elements2words() #' } #' elements2words <- function(prot, column = "DomArch", conversion_type = "da2doms") { @@ -175,11 +199,19 @@ elements2words <- function(prot, column = "DomArch", conversion_type = "da2doms" #' @description #' Get word counts (wc) [DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)] #' -#' @param string +#' @param string A character string containing the elements (words) to count. +#' This would typically be a space-delimited string representing domain +#' architectures or genomic contexts. #' -#' @importFrom dplyr as_tibble filter +#' @importFrom dplyr as_tibble filter arrange +#' @importFrom stringr str_replace_all #' -#' @return [tbl_df] table with 2 columns: 1) words & 2) counts/frequency +#' @return A tibble (tbl_df) with two columns: +#' \describe{ +#' \item{`words`}{A column containing the individual words +#' (domains or domain architectures).} +#' \item{`freq`}{A column containing the frequency counts for each word.} +#' } #' #' @examples #' \dontrun{ @@ -219,10 +251,15 @@ words2wc <- function(string) { ## Function to filter based on frequencies #' Filter Frequency #' -#' @param x -#' @param min.freq +#' @param x A tibble (tbl_df) containing at least two columns: one for +#' elements (e.g., `words`) and one for their frequency (e.g., `freq`). +#' @param min.freq A numeric value specifying the minimum frequency threshold. +#' Only elements with frequencies greater than or equal to this value will be +#' retained. +#' +#' @return A tibble with the same structure as `x`, but filtered to include +#' only rows where the frequency is greater than or equal to `min.freq`. #' -#' @return Describe return, in detail #' @export #' #' @examples @@ -239,15 +276,20 @@ filter_freq <- function(x, min.freq) { ######################### #' Summarize by Lineage #' -#' @param prot -#' @param column -#' @param by -#' @param query +#' @param prot A dataframe or tibble containing the data. +#' @param column A string representing the column to be summarized +#' (e.g., `DomArch`). Default is "DomArch". +#' @param by A string representing the grouping column (e.g., `Lineage`). +#' Default is "Lineage". +#' @param query A string specifying the query pattern for filtering the target +#' column. Use "all" to skip filtering and include all rows. #' #' @importFrom dplyr arrange filter group_by summarise #' @importFrom rlang sym #' -#' @return Describe return, in detail +#' @return A tibble summarizing the counts of occurrences of elements in +#' the `column`, grouped by the `by` column. The result includes the number +#' of occurrences (`count`) and is arranged in descending order of count. #' @export #' #' @examples @@ -283,11 +325,17 @@ summarize_bylin <- function(prot = "prot", column = "DomArch", by = "Lineage", #' Function to summarize and retrieve counts by Domains & Domains+Lineage #' #' -#' @param x +#' @param x A dataframe or tibble containing the data. It must have columns +#' named `DomArch` and `Lineage`. #' #' @importFrom dplyr arrange count desc filter group_by summarise #' -#' @return Describe return, in detail +#' @return A tibble summarizing the counts of unique domain architectures +#' (`DomArch`) per lineage (`Lineage`). The resulting table contains three +#' columns: `DomArch`, `Lineage`, and `count`, which indicates the frequency +#' of each domain architecture for each lineage. The results are arranged in +#' descending order of `count`. +#' #' @export #' #' @examples @@ -309,11 +357,18 @@ summ.DA.byLin <- function(x) { #' @description #' Function to retrieve counts of how many lineages a DomArch appears in #' -#' @param x +#' @param x A dataframe or tibble containing the data. It must have a column +#' named `DomArch` and a count column, such as `count`, which represents the +#' occurrences of each architecture in various lineages. #' #' @importFrom dplyr arrange group_by filter summarise #' -#' @return Describe return, in detail +#' @return A tibble summarizing each unique `DomArch`, along with the following +#' columns: +#' - `totalcount`: The total occurrences of each `DomArch` across all lineages. +#' - `totallin`: The total number of unique lineages in which each `DomArch` +#' appears. +#' The results are arranged in descending order of `totallin` and `totalcount`. #' @export #' #' @examples @@ -332,11 +387,20 @@ summ.DA <- function(x) { #' summ.GC.byDALin #' -#' @param x +#' @param x A dataframe or tibble containing the data. It must have columns +#' named `GenContext`, `DomArch`, and `Lineage`. #' #' @importFrom dplyr arrange desc filter group_by n summarise #' -#' @return Define return, in detail +#' @return A tibble summarizing each unique combination of `GenContext`, +#' `DomArch`, and `Lineage`, along with the following columns: +#' - `GenContext`: The genomic context for each entry. +#' - `DomArch`: The domain architecture for each entry. +#' - `Lineage`: The lineage associated with each entry. +#' - `count`: The total number of occurrences for each combination of +#' `GenContext`, `DomArch`, and `Lineage`. +#' +#' The results are arranged in descending order of `count`. #' @export #' #' @examples @@ -382,11 +446,19 @@ summ.GC.byLin <- function(x) { #' summ.GC #' -#' @param x +#' @param x A dataframe or tibble containing the data. It must have columns +#' named `GenContext`, `DomArch`, and `Lineage`. #' -#' @importFrom dplyr arrange desc filter group_by n_distinct summarise +#' @importFrom dplyr arrange desc filter group_by n summarise #' -#' @return Describe return, in detail +#' @return A tibble summarizing each unique combination of `GenContext` and +#' `Lineage`, along with the following columns: +#' - `GenContext`: The genomic context for each entry. +#' - `Lineage`: The lineage associated with each entry. +#' - `count`: The total number of occurrences for each combination of +#' `GenContext` and `Lineage`. +#' +#' The results are arranged in descending order of `count`. #' @export #' #' @examples @@ -419,16 +491,27 @@ summ.GC <- function(x) { #' #' @param prot A data frame that must contain columns: #' \itemize{\item Either 'GenContext' or 'DomArch.norep' \item count} -#' @param column Character. The column to summarize -#' @param lineage_col -#' @param cutoff Numeric. Cutoff for total count. Counts below cutoff value will not be shown. Default is 0. -#' @param RowsCutoff -#' @param digits +#' @param column Character. The column to summarize, default is "DomArch". +#' @param lineage_col Character. The name of the lineage column, default is +#' "Lineage". +#' @param cutoff Numeric. Cutoff for total count. Counts below this cutoff value +#' will not be shown. Default is 0. +#' @param RowsCutoff Logical. If TRUE, filters based on cumulative percentage +#' cutoff. Default is FALSE. +#' @param digits Numeric. Number of decimal places for percentage columns. +#' Default is 2. +#' #' #' @importFrom dplyr arrange distinct filter group_by left_join mutate select summarise ungroup #' @importFrom rlang as_string sym #' -#' @return Define return, in detail +#' @return A data frame with the following columns: +#' - `{{ column }}`: Unique values from the specified column. +#' - `totalcount`: The total count of occurrences for each unique value in +#' the specified column. +#' - `IndividualCountPercent`: The percentage of each `totalcount` relative to +#' the overall count. +#' - `CumulativePercent`: The cumulative percentage of total counts. #' @export #' #' @note Please refer to the source code if you have alternate file formats and/or diff --git a/man/count_bycol.Rd b/man/count_bycol.Rd index 884c0f0f..946a7ea2 100644 --- a/man/count_bycol.Rd +++ b/man/count_bycol.Rd @@ -7,16 +7,32 @@ count_bycol(prot = prot, column = "DomArch", min.freq = 1) } \arguments{ -\item{min.freq}{} +\item{prot}{A data frame containing the dataset to analyze, typically with +multiple columns including the one specified by the \code{column} parameter.} + +\item{column}{A character string specifying the name of the column to analyze. +The default is "DomArch".} + +\item{min.freq}{An integer specifying the minimum frequency an element must +have to be included in the output. Default is 1.} } \value{ -Describe return, in detail +A tibble with two columns: +\describe{ +\item{\code{column}}{The unique elements from the specified column +(e.g., "DomArch").} +\item{\code{freq}}{The frequency of each element, i.e., the number of times +each element appears in the specified column.} +} +The tibble is filtered to only include elements that have a frequency +greater than or equal to \code{min.freq} and does not include elements with \code{NA} +values or those starting with a hyphen ("-"). } \description{ Count Bycol } \examples{ \dontrun{ -count_bycol() +count_bycol(prot = my_data, column = "DomArch", min.freq = 10) } } diff --git a/man/elements2words.Rd b/man/elements2words.Rd index 80fcbafb..bda447db 100644 --- a/man/elements2words.Rd +++ b/man/elements2words.Rd @@ -7,15 +7,25 @@ elements2words(prot, column = "DomArch", conversion_type = "da2doms") } \arguments{ -\item{prot}{\link{dataframe}} +\item{prot}{A dataframe containing the dataset to analyze. The specified +\code{column} contains the string elements to be processed.} -\item{column}{\link{string} column name} +\item{column}{A character string specifying the name of the column to analyze. +Default is "DomArch".} -\item{conversion_type}{\link{string} type of conversion: 'da2doms': domain architectures to -domains. 'gc2da' genomic context to domain architectures} +\item{conversion_type}{A character string specifying the type of conversion. +Two options are available: +\describe{ +\item{\code{da2doms}}{Convert domain architectures into individual domains by +replacing \code{+} symbols with spaces.} +\item{\code{gc2da}}{Convert genomic context into domain architectures by +replacing directional symbols (\verb{<-}, \verb{->}, and \code{|}) with spaces.} +}} } \value{ -\link{string} with words delimited by spaces +A single string where elements are delimited by spaces. The function +performs necessary substitutions based on the \code{conversion_type} and cleans up +extraneous characters like newlines, tabs, and multiple spaces. } \description{ Break string ELEMENTS into WORDS for domain architecture (DA) and genomic @@ -23,7 +33,8 @@ context (GC) } \examples{ \dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> elements2words() +tibble::tibble(DomArch = c("aaa+bbb", +"a+b", "b+c", "b-c")) |> elements2words() } } diff --git a/man/filter_freq.Rd b/man/filter_freq.Rd index ce4db5ac..9dfba73b 100644 --- a/man/filter_freq.Rd +++ b/man/filter_freq.Rd @@ -7,10 +7,16 @@ filter_freq(x, min.freq) } \arguments{ -\item{min.freq}{} +\item{x}{A tibble (tbl_df) containing at least two columns: one for +elements (e.g., \code{words}) and one for their frequency (e.g., \code{freq}).} + +\item{min.freq}{A numeric value specifying the minimum frequency threshold. +Only elements with frequencies greater than or equal to this value will be +retained.} } \value{ -Describe return, in detail +A tibble with the same structure as \code{x}, but filtered to include +only rows where the frequency is greater than or equal to \code{min.freq}. } \description{ Filter Frequency diff --git a/man/summ.DA.Rd b/man/summ.DA.Rd index 13717140..01d15b3c 100644 --- a/man/summ.DA.Rd +++ b/man/summ.DA.Rd @@ -7,10 +7,19 @@ summ.DA(x) } \arguments{ -\item{x}{} +\item{x}{A dataframe or tibble containing the data. It must have a column +named \code{DomArch} and a count column, such as \code{count}, which represents the +occurrences of each architecture in various lineages.} } \value{ -Describe return, in detail +A tibble summarizing each unique \code{DomArch}, along with the following +columns: +\itemize{ +\item \code{totalcount}: The total occurrences of each \code{DomArch} across all lineages. +\item \code{totallin}: The total number of unique lineages in which each \code{DomArch} +appears. +The results are arranged in descending order of \code{totallin} and \code{totalcount}. +} } \description{ Function to retrieve counts of how many lineages a DomArch appears in diff --git a/man/summ.DA.byLin.Rd b/man/summ.DA.byLin.Rd index 66555fd5..d88e5d37 100644 --- a/man/summ.DA.byLin.Rd +++ b/man/summ.DA.byLin.Rd @@ -7,10 +7,15 @@ summ.DA.byLin(x) } \arguments{ -\item{x}{} +\item{x}{A dataframe or tibble containing the data. It must have columns +named \code{DomArch} and \code{Lineage}.} } \value{ -Describe return, in detail +A tibble summarizing the counts of unique domain architectures +(\code{DomArch}) per lineage (\code{Lineage}). The resulting table contains three +columns: \code{DomArch}, \code{Lineage}, and \code{count}, which indicates the frequency +of each domain architecture for each lineage. The results are arranged in +descending order of \code{count}. } \description{ Function to summarize and retrieve counts by Domains & Domains+Lineage diff --git a/man/summ.GC.Rd b/man/summ.GC.Rd index fa52a6bf..2ec4d651 100644 --- a/man/summ.GC.Rd +++ b/man/summ.GC.Rd @@ -7,10 +7,20 @@ summ.GC(x) } \arguments{ -\item{x}{} +\item{x}{A dataframe or tibble containing the data. It must have columns +named \code{GenContext}, \code{DomArch}, and \code{Lineage}.} } \value{ -Describe return, in detail +A tibble summarizing each unique combination of \code{GenContext} and +\code{Lineage}, along with the following columns: +\itemize{ +\item \code{GenContext}: The genomic context for each entry. +\item \code{Lineage}: The lineage associated with each entry. +\item \code{count}: The total number of occurrences for each combination of +\code{GenContext} and \code{Lineage}. +} + +The results are arranged in descending order of \code{count}. } \description{ summ.GC diff --git a/man/summ.GC.byDALin.Rd b/man/summ.GC.byDALin.Rd index 34c9f84d..7fc8d443 100644 --- a/man/summ.GC.byDALin.Rd +++ b/man/summ.GC.byDALin.Rd @@ -7,10 +7,21 @@ summ.GC.byDALin(x) } \arguments{ -\item{x}{} +\item{x}{A dataframe or tibble containing the data. It must have columns +named \code{GenContext}, \code{DomArch}, and \code{Lineage}.} } \value{ -Define return, in detail +A tibble summarizing each unique combination of \code{GenContext}, +\code{DomArch}, and \code{Lineage}, along with the following columns: +\itemize{ +\item \code{GenContext}: The genomic context for each entry. +\item \code{DomArch}: The domain architecture for each entry. +\item \code{Lineage}: The lineage associated with each entry. +\item \code{count}: The total number of occurrences for each combination of +\code{GenContext}, \code{DomArch}, and \code{Lineage}. +} + +The results are arranged in descending order of \code{count}. } \description{ summ.GC.byDALin diff --git a/man/summarize_bylin.Rd b/man/summarize_bylin.Rd index a94c54c1..92b93652 100644 --- a/man/summarize_bylin.Rd +++ b/man/summarize_bylin.Rd @@ -7,10 +7,21 @@ summarize_bylin(prot = "prot", column = "DomArch", by = "Lineage", query) } \arguments{ -\item{query}{} +\item{prot}{A dataframe or tibble containing the data.} + +\item{column}{A string representing the column to be summarized +(e.g., \code{DomArch}). Default is "DomArch".} + +\item{by}{A string representing the grouping column (e.g., \code{Lineage}). +Default is "Lineage".} + +\item{query}{A string specifying the query pattern for filtering the target +column. Use "all" to skip filtering and include all rows.} } \value{ -Describe return, in detail +A tibble summarizing the counts of occurrences of elements in +the \code{column}, grouped by the \code{by} column. The result includes the number +of occurrences (\code{count}) and is arranged in descending order of count. } \description{ Summarize by Lineage diff --git a/man/total_counts.Rd b/man/total_counts.Rd index 49db8822..53d70096 100644 --- a/man/total_counts.Rd +++ b/man/total_counts.Rd @@ -17,14 +17,30 @@ total_counts( \item{prot}{A data frame that must contain columns: \itemize{\item Either 'GenContext' or 'DomArch.norep' \item count}} -\item{column}{Character. The column to summarize} +\item{column}{Character. The column to summarize, default is "DomArch".} -\item{cutoff}{Numeric. Cutoff for total count. Counts below cutoff value will not be shown. Default is 0.} +\item{lineage_col}{Character. The name of the lineage column, default is +"Lineage".} -\item{digits}{} +\item{cutoff}{Numeric. Cutoff for total count. Counts below this cutoff value +will not be shown. Default is 0.} + +\item{RowsCutoff}{Logical. If TRUE, filters based on cumulative percentage +cutoff. Default is FALSE.} + +\item{digits}{Numeric. Number of decimal places for percentage columns. +Default is 2.} } \value{ -Define return, in detail +A data frame with the following columns: +\itemize{ +\item \code{{{ column }}}: Unique values from the specified column. +\item \code{totalcount}: The total count of occurrences for each unique value in +the specified column. +\item \code{IndividualCountPercent}: The percentage of each \code{totalcount} relative to +the overall count. +\item \code{CumulativePercent}: The cumulative percentage of total counts. +} } \description{ Creates a data frame with a totalcount column diff --git a/man/words2wc.Rd b/man/words2wc.Rd index 1eba5dc4..69d006d5 100644 --- a/man/words2wc.Rd +++ b/man/words2wc.Rd @@ -7,10 +7,17 @@ words2wc(string) } \arguments{ -\item{string}{} +\item{string}{A character string containing the elements (words) to count. +This would typically be a space-delimited string representing domain +architectures or genomic contexts.} } \value{ -\link{tbl_df} table with 2 columns: 1) words & 2) counts/frequency +A tibble (tbl_df) with two columns: +\describe{ +\item{\code{words}}{A column containing the individual words +(domains or domain architectures).} +\item{\code{freq}}{A column containing the frequency counts for each word.} +} } \description{ Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} From 30d4bf3ab57c6296a81d6f792911c87586ca896e Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Sat, 5 Oct 2024 12:29:37 +0100 Subject: [PATCH 04/41] usethis::pr_init("Implement error handling in acc2lin.R functions - Added validation checks for input parameters (accessions, ipg_file, assembly_path, lineagelookup_path). - Included error messages for missing or invalid inputs and file existence checks. - Wrapped main logic in tryCatch for graceful error handling during execution. ") --- R/acc2lin.R | 267 ++++++++++++++++++++++++++++++++++------------ man/acc2lin.Rd | 3 +- man/efetch_ipg.Rd | 3 +- man/ipg2lin.Rd | 3 +- man/sink.reset.Rd | 1 + 5 files changed, 207 insertions(+), 70 deletions(-) diff --git a/R/acc2lin.R b/R/acc2lin.R index f8d71949..dfb33da9 100644 --- a/R/acc2lin.R +++ b/R/acc2lin.R @@ -10,6 +10,8 @@ #' Sink Reset #' #' @return No return, but run to close all outstanding `sink()`s +#' and handles any errors or warnings that occur during the process. +#' #' @export #' #' @examples @@ -17,9 +19,19 @@ #' sink.reset() #' } sink.reset <- function() { + # Handle all errors and warnings + tryCatch({ for (i in seq_len(sink.number())) { - sink(NULL) + sink(NULL) } + print("All sinks closed") + }, error = function(e) { + print(paste("Error: ", e$message)) + }, warning = function(w) { + print(paste("Warning: ", w$message)) + }, finally = { + print("resetSink function execution completed.") + }) } @@ -44,23 +56,61 @@ sink.reset <- function() { #' add_lins() #' } add_lins <- function(df, acc_col = "AccNum", assembly_path, - lineagelookup_path, ipgout_path = NULL, plan = "sequential") { - s_acc_col <- sym(acc_col) - accessions <- df %>% pull(acc_col) - lins <- acc2lin(accessions, assembly_path, lineagelookup_path, ipgout_path, plan) + lineagelookup_path, ipgout_path = NULL, + plan = "sequential") { + # check for validate inputs + if (!is.data.frame(df)) { + stop("Input 'df' must be a data frame.") + } + + if (!acc_col %in% colnames(df)) { + stop(paste("Column", acc_col, "not found in data frame.")) + } + + # Ensure paths are character strings + if (!is.character(assembly_path) || !is.character(lineagelookup_path)) { + stop("Both 'assembly_path' and + 'lineagelookup_path' must be character strings.") + } + + # Ensure paths exist + if (!file.exists(assembly_path)) { + stop(paste("Assembly file not found at:", assembly_path)) + } - # Drop a lot of the unimportant columns for now? will make merging much easier - lins <- lins[, c( + if (!file.exists(lineagelookup_path)) { + stop(paste("Lineage lookup file not found at:", lineagelookup_path)) + } + tryCatch({ + # Attempt to add lineages + acc_col <- sym(acc_col) + accessions <- df %>% pull(acc_col) + lins <- acc2lin( + accessions, assembly_path, lineagelookup_path, ipgout_path, plan + ) + + # Drop a lot of the unimportant columns for now? + # will make merging much easier + lins <- lins[, c( "Strand", "Start", "Stop", "Nucleotide Accession", "Source", "Id", "Strain" - ) := NULL] - lins <- unique(lins) + ) := NULL] + lins <- unique(lins) + + # dup <- lins %>% group_by(Protein) %>% + # summarize(count = n()) %>% filter(count > 1) %>% + # pull(Protein) - # dup <- lins %>% group_by(Protein) %>% summarize(count = n()) %>% filter(count > 1) %>% - # pull(Protein) + merged <- merge(df, lins, by.x = acc_col, by.y = "Protein", all.x = TRUE) + return(merged) + }, error = function(e) { + print(paste("Error: ", e$message)) + }, warning = function(w) { + print(paste("Warning: ", w$message)) + }, finally = { + print("addLineages function execution completed.") + }) - merged <- merge(df, lins, by.x = acc_col, by.y = "Protein", all.x = TRUE) - return(merged) } @@ -68,7 +118,8 @@ add_lins <- function(df, acc_col = "AccNum", assembly_path, #' #' @author Samuel Chen, Janani Ravi #' -#' @description This function combines 'efetch_ipg()' and 'ipg2lin()' to map a set +#' @description This function combines 'efetch_ipg()' +#' and 'ipg2lin()' to map a set #' of protein accessions to their assembly (GCA_ID), tax ID, and lineage. #' #' @param accessions Character vector of protein accessions @@ -76,7 +127,8 @@ add_lins <- function(df, acc_col = "AccNum", assembly_path, #' This file can be generated using the "DownloadAssemblySummary()" function #' @param lineagelookup_path String of the path to the lineage lookup file #' (taxid to lineage mapping). This file can be generated using the -#' @param ipgout_path Path to write the results of the efetch run of the accessions +#' @param ipgout_path Path to write the results +#' of the efetch run of the accessions #' on the ipg database. If NULL, the file will not be written. Defaults to NULL #' @param plan #' @@ -87,27 +139,43 @@ add_lins <- function(df, acc_col = "AccNum", assembly_path, #' \dontrun{ #' acc2lin() #' } -acc2lin <- function(accessions, assembly_path, lineagelookup_path, ipgout_path = NULL, plan = "sequential") { - tmp_ipg <- F - if (is.null(ipgout_path)) { - tmp_ipg <- T - ipgout_path <- tempfile("ipg", fileext = ".txt") - } +acc2lin <- function(accessions, assembly_path, + lineagelookup_path, ipgout_path = NULL, + plan = "sequential") { + tmp_ipg <- F + if (is.null(ipgout_path)) { + tmp_ipg <- T + ipgout_path <- tempfile("ipg", fileext = ".txt") + } + + lins <- NULL + tryCatch({ + # Attempt to fetch IPG efetch_ipg(accessions, out_path = ipgout_path, plan) + # Attempt to process IPG to lineages lins <- ipg2lin(accessions, ipgout_path, assembly_path, lineagelookup_path) + }, error = function(e) { + print(paste("An error occurred: ", e$message)) + }, warning = function(w) { + print(paste("Warning: ", w$message)) + }, finally = { + print("acc2lin function execution completed.") + }) - if (tmp_ipg) { - unlink(tempdir(), recursive = T) - } - return(lins) + if (tmp_ipg) { + unlink(tempdir(), recursive = T) + } + return(lins) } + #' efetch_ipg #' #' @author Samuel Chen, Janani Ravi #' -#' @description Perform efetch on the ipg database and write the results to out_path +#' @description Perform efetch on the ipg database +#' and write the results to out_path #' #' @param accnums Character vector containing the accession numbers to query on #' the ipg database @@ -126,57 +194,84 @@ acc2lin <- function(accessions, assembly_path, lineagelookup_path, ipgout_path = #' efetch_ipg() #' } efetch_ipg <- function(accnums, out_path, plan = "sequential") { - if (length(accnums) > 0) { - partition <- function(in_data, groups) { - # \\TODO This function should be defined outside of efetch_ipg(). It can be non-exported/internal - # Partition data to limit number of queries per second for rentrez fetch: - # limit of 10/second w/ key - l <- length(in_data) - - partitioned <- list() - for (i in 1:groups) - { - partitioned[[i]] <- in_data[seq.int(i, l, groups)] - } - - return(partitioned) - } + # Argument validation + if (!is.character(accnums) || length(accnums) == 0) { + stop("Error: 'accnums' must be a non-empty character vector.") + } + + if (!is.character(out_path) || nchar(out_path) == 0) { + stop("Error: 'out_path' must be a non-empty string.") + } + + if (!is.function(plan)) { + stop("Error: 'plan' must be a valid plan function.") + } + if (length(accnums) > 0) { + partition <- function(in_data, groups) { + # \\TODO This function should be defined outside of efetch_ipg(). + # It can be non-exported/internal + # Partition data to limit number of queries per second for rentrez fetch: + # limit of 10/second w/ key + l <- length(in_data) - plan(strategy = plan, .skip = T) - - - min_groups <- length(accnums) / 200 - groups <- min(max(min_groups, 15), length(accnums)) - partitioned_acc <- partition(accnums, groups) - sink(out_path) - - a <- future_map(1:length(partitioned_acc), function(x) { - # Avoid hitting the rate API limit - if (x %% 9 == 0) { - Sys.sleep(1) - } - cat( - entrez_fetch( - id = partitioned_acc[[x]], - db = "ipg", - rettype = "xml", - api_key = "YOUR_KEY_HERE" ## Can this be included in public package? - ) - ) - }) - sink(NULL) + partitioned <- list() + for (i in 1:groups){ + partitioned[[i]] <- in_data[seq.int(i, l, groups)] + } + + return(partitioned) } + tryCatch({ + # Set the future plan strategy + plan(strategy = plan, .skip = T) + + + min_groups <- length(accnums) / 200 + groups <- min(max(min_groups, 15), length(accnums)) + partitioned_acc <- partition(accnums, groups) + + # Open the sink to the output path + sink(out_path) + + a <- future_map(1:length(partitioned_acc), function(x) { + # Avoid hitting the rate API limit + if (x %% 9 == 0) { + Sys.sleep(1) + } + cat( + entrez_fetch( + id = partitioned_acc[[x]], + db = "ipg", + rettype = "xml", + api_key = "YOUR_KEY_HERE" ## Can this be included in public package? + ) + ) + }) + sink(NULL) + }, error = function(e) { + print(paste("An error occurred: ", e$message)) + }, warning = function(w) { + print(paste("Warning: ", w$message)) + }, finally = { + print("efetch_ipg function execution completed.") + }) + } } + + #' ipg2lin #' #' @author Samuel Chen, Janani Ravi #' -#' @description Takes the resulting file of an efetch run on the ipg database and +#' @description Takes the resulting file +#' of an efetch run on the ipg database and #' #' @param accessions Character vector of protein accessions -#' @param ipg_file Filepath to the file containing results of an efetch run on the -#' ipg database. The protein accession in 'accessions' should be contained in this +#' @param ipg_file Filepath to the file +#' containing results of an efetch run on the +#' ipg database. The protein accession in +#' 'accessions' should be contained in this #' file #' @param assembly_path String of the path to the assembly_summary path #' This file can be generated using the "DownloadAssemblySummary()" function @@ -195,16 +290,54 @@ efetch_ipg <- function(accnums, out_path, plan = "sequential") { #' } #' ipg2lin <- function(accessions, ipg_file, assembly_path, lineagelookup_path) { + # Argument validation for accessions + if (!is.character(accessions) || length(accessions) == 0) { + stop("Input 'accessions' must be a non-empty character vector.") + } + + # check for validate inputs + if (!is.character(ipg_file)) { + stop("Input 'ipg_file' must be a character string.") + } + # Ensure paths are character strings + if (!is.character(assembly_path) || !is.character(lineagelookup_path)) { + stop("Both 'assembly_path' and + 'lineagelookup_path' must be character strings.") + } + + # Ensure paths exist + if (!file.exists(assembly_path)) { + stop(paste("Assembly file not found at:", assembly_path)) + } + + if (!file.exists(lineagelookup_path)) { + stop(paste("Lineage lookup file not found at:", lineagelookup_path)) + } + + try({ + # Attempt to read the IPG file ipg_dt <- fread(ipg_file, sep = "\t", fill = T) + # Filter the IPG data table to only include the accessions ipg_dt <- ipg_dt[Protein %in% accessions] + # Rename the 'Assembly' column to 'GCA_ID' ipg_dt <- setnames(ipg_dt, "Assembly", "GCA_ID") + # Convert the IPG data table to a lineage data table lins <- GCA2Lins(prot_data = ipg_dt, assembly_path, lineagelookup_path) + + # Filter out rows with missing lineage information lins <- lins[!is.na(Lineage)] %>% unique() return(lins) + }, error = function(e) { + print(paste("An error occurred: ", e$message)) + }, warning = function(w) { + print(paste("Warning: ", w$message)) + }, finally = { + print("ipg2lin function execution completed.") + }) } diff --git a/man/acc2lin.Rd b/man/acc2lin.Rd index 6255b290..d3f2468b 100644 --- a/man/acc2lin.Rd +++ b/man/acc2lin.Rd @@ -38,7 +38,8 @@ on the ipg database. If NULL, the file will not be written. Defaults to NULL} Describe return, in detail } \description{ -This function combines 'efetch_ipg()' and 'ipg2lin()' to map a set +This function combines 'efetch_ipg()' +and 'ipg2lin()' to map a set of protein accessions to their assembly (GCA_ID), tax ID, and lineage. Function to map protein accession numbers to lineage diff --git a/man/efetch_ipg.Rd b/man/efetch_ipg.Rd index ec5b6bcb..1fbb9d92 100644 --- a/man/efetch_ipg.Rd +++ b/man/efetch_ipg.Rd @@ -23,7 +23,8 @@ the ipg database} Describe return, in detail } \description{ -Perform efetch on the ipg database and write the results to out_path +Perform efetch on the ipg database +and write the results to out_path Perform efetch on the ipg database and write the results to out_path } diff --git a/man/ipg2lin.Rd b/man/ipg2lin.Rd index 3a14eada..453668b0 100644 --- a/man/ipg2lin.Rd +++ b/man/ipg2lin.Rd @@ -38,7 +38,8 @@ This file can be generated using the "DownloadAssemblySummary()" function} Describe return, in detail } \description{ -Takes the resulting file of an efetch run on the ipg database and +Takes the resulting file +of an efetch run on the ipg database and Takes the resulting file of an efetch run on the ipg database and append lineage, and taxid columns diff --git a/man/sink.reset.Rd b/man/sink.reset.Rd index a31b841d..64087c49 100644 --- a/man/sink.reset.Rd +++ b/man/sink.reset.Rd @@ -8,6 +8,7 @@ sink.reset() } \value{ No return, but run to close all outstanding \code{sink()}s +and handles any errors or warnings that occur during the process. } \description{ Sink Reset From aff97e433e5a0c367dfbb8f284ea200e1876a5da Mon Sep 17 00:00:00 2001 From: teddyCodex <15376476+teddyCodex@users.noreply.github.com> Date: Sat, 5 Oct 2024 16:16:51 +0100 Subject: [PATCH 05/41] Update CONTRIBUTING.md Added a couple of clearer steps to the pull request process. --- .github/CONTRIBUTING.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 180ecf6c..5f240176 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -18,8 +18,17 @@ See our guide on [how to create a great issue](https://code-review.tidyverse.org ### Pull request process -* Fork the package and clone onto your computer. If you haven't done this before, we recommend using `usethis::create_from_github("JRaviLab/MolEvolvR", fork = TRUE)`. - +* Fork the package and clone onto your computer. If you haven't done this before, we recommend using `usethis`. + +* Install and load the `usethis` package with: + ``` + install.packages("usethis") + library(usethis) + ``` +* Clone and fork the MolEvolvR package using: + ``` + usethis::create_from_github("JRaviLab/MolEvolvR", fork = TRUE) + ``` * Install all development dependencies with `devtools::install_dev_deps()`, and then make sure the package passes R CMD check by running `devtools::check()`. If R CMD check doesn't pass cleanly, it's a good idea to ask for help before continuing. * Create a Git branch for your pull request (PR). We recommend using `usethis::pr_init("brief-description-of-change")`. From 3a0376fc7024f6069580ce2059c27510bffb16d0 Mon Sep 17 00:00:00 2001 From: teddyCodex <15376476+teddyCodex@users.noreply.github.com> Date: Sat, 5 Oct 2024 16:29:11 +0100 Subject: [PATCH 06/41] Update CONTRIBUTING.md --- .github/CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 5f240176..9465c683 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -23,7 +23,7 @@ See our guide on [how to create a great issue](https://code-review.tidyverse.org * Install and load the `usethis` package with: ``` install.packages("usethis") - library(usethis) + library("usethis") ``` * Clone and fork the MolEvolvR package using: ``` From 67a6d0eaeded334e6869621a70c781cd917dd3bc Mon Sep 17 00:00:00 2001 From: teddyCodex <15376476+teddyCodex@users.noreply.github.com> Date: Sun, 6 Oct 2024 09:30:55 +0100 Subject: [PATCH 07/41] Update CONTRIBUTING.md to include explicit installation steps and improved clarity for development process - Added explicit instructions for installing and loading the `usethis`, `devtools`, and `lintr` packages. - Overall improvements to make the documentation more user-friendly, especially for new contributors. --- .github/CONTRIBUTING.md | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 9465c683..5db3f961 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -23,15 +23,29 @@ See our guide on [how to create a great issue](https://code-review.tidyverse.org * Install and load the `usethis` package with: ``` install.packages("usethis") + library("usethis") ``` * Clone and fork the MolEvolvR package using: ``` usethis::create_from_github("JRaviLab/MolEvolvR", fork = TRUE) ``` -* Install all development dependencies with `devtools::install_dev_deps()`, and then make sure the package passes R CMD check by running `devtools::check()`. - If R CMD check doesn't pass cleanly, it's a good idea to ask for help before continuing. -* Create a Git branch for your pull request (PR). We recommend using `usethis::pr_init("brief-description-of-change")`. +* Install all development dependencies and then make sure the package passes R CMD check using `devtools`: + ``` + install.packages("devtools") + + library("devtools") + + devtools::install_dev_deps() + + devtools::check() + ``` + _If R CMD check doesn't pass cleanly, it's a good idea to ask for help before continuing._ + +* Create a Git branch for your pull request (PR). We recommend using + ``` + usethis::pr_init("brief-description-of-change") + ``` * Make your changes, commit to git, and then create a PR by running `usethis::pr_push()`, and following the prompts in your browser. The title of your PR should briefly describe the change. @@ -44,7 +58,14 @@ See our guide on [how to create a great issue](https://code-review.tidyverse.org * New code should follow the tidyverse [style guide](https://style.tidyverse.org). You can use the [styler](https://CRAN.R-project.org/package=styler) package to apply these styles, but please don't restyle code that has nothing to do with your PR. -* Lint Your Code: Ensure your code adheres to our style guidelines by using [lintr](https://lintr.r-lib.org/): `lintr::lint("path/to/your/file.R")` +* Lint Your Code: Ensure your code adheres to our style guidelines by using [lintr](https://lintr.r-lib.org/): + ``` + install.packages("lintr") + + library("lintr") + + lintr::lint("path/to/your/file.R") + ``` * We use [roxygen2](https://cran.r-project.org/package=roxygen2), with [Markdown syntax](https://cran.r-project.org/web/packages/roxygen2/vignettes/rd-formatting.html), for documentation. From 730842f2e7162f87cb26a7493ac7fe42a16f2cb2 Mon Sep 17 00:00:00 2001 From: valentina buoro Date: Mon, 7 Oct 2024 02:26:31 +0100 Subject: [PATCH 08/41] perf:updated functions to pass R-CMD Check: no visible binding for global variable for underlisted functions(lineage.neighbors.plot, lineage_sunburst, make_accnums_unique,make_df_iprscan_domain --- NAMESPACE | 1 + R/cleanup.R | 8 ++++---- R/fa2domain.R | 14 +++++++------- R/ipr2viz.R | 5 +++-- R/plotting.R | 14 +++++++------- 5 files changed, 22 insertions(+), 20 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 16cf0813..2c5597c6 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -229,6 +229,7 @@ importFrom(readr,write_file) importFrom(readr,write_lines) importFrom(readr,write_tsv) importFrom(rentrez,entrez_fetch) +importFrom(rlang,.data) importFrom(rlang,as_string) importFrom(rlang,sym) importFrom(sendmailR,mime_part) diff --git a/R/cleanup.R b/R/cleanup.R index 3a708415..1d9a1395 100755 --- a/R/cleanup.R +++ b/R/cleanup.R @@ -88,12 +88,12 @@ make_accnums_unique <- function(accnums) { # for the index of occurence for each accession number df_accnums <- tibble::tibble("accnum" = accnums) df_accnums <- df_accnums |> - dplyr::group_by(accnum) |> + dplyr::group_by(.data$accnum) |> dplyr::mutate(suffix = dplyr::row_number()) |> dplyr::ungroup() |> - dplyr::mutate(accnum_adjusted = paste0(accnum, "_", suffix)) |> - dplyr::arrange(accnum_adjusted) - accnums_adjusted <- df_accnums |> dplyr::pull(accnum_adjusted) + dplyr::mutate(accnum_adjusted = paste0(.data$accnum, "_", .data$suffix)) |> + dplyr::arrange(.data$accnum_adjusted) + accnums_adjusted <- df_accnums |> dplyr::pull(.data$accnum_adjusted) return(accnums_adjusted) } diff --git a/R/fa2domain.R b/R/fa2domain.R index 672d0856..ab1369e7 100644 --- a/R/fa2domain.R +++ b/R/fa2domain.R @@ -138,10 +138,10 @@ make_df_iprscan_domains <- function( # filter for the accnum of interest (note: it's possible the accession # number is not in the table [i.e., it had no domains]) df_iprscan_accnum <- df_iprscan |> - dplyr::filter(Analysis %in% analysis) |> - dplyr::filter(AccNum == accnum) |> + dplyr::filter(.data$Analysis %in% analysis) |> + dplyr::filter(.data$AccNum == accnum) |> dplyr::select(dplyr::all_of(c("AccNum", "DB.ID", "StartLoc", "StopLoc"))) |> - dplyr::arrange(StartLoc) + dplyr::arrange(.data$StartLoc) # handle the case of no records after filtering by "Analysis"; return the tibble # with 0 rows quickly if (nrow(df_iprscan_accnum) < 1) { @@ -153,9 +153,9 @@ make_df_iprscan_domains <- function( dplyr::rowwise() |> dplyr::mutate( seq_domain = XVector::subseq( - fasta[[grep(pattern = AccNum, x = names(fasta), fixed = TRUE)]], - start = StartLoc, - end = StopLoc + fasta[[grep(pattern = .data$AccNum, x = names(fasta), fixed = TRUE)]], + start = .data$StartLoc, + end = .data$StopLoc ) |> as.character() ) @@ -166,7 +166,7 @@ make_df_iprscan_domains <- function( id_domain = stringr::str_glue("{AccNum}-{DB.ID}-{StartLoc}_{StopLoc}") ) |> dplyr::ungroup() |> - dplyr::relocate(id_domain, .before = 1) + dplyr::relocate(.data$id_domain, .before = 1) return(df_iprscan_domains) } diff --git a/R/ipr2viz.R b/R/ipr2viz.R index bf3650f7..b0db06f9 100644 --- a/R/ipr2viz.R +++ b/R/ipr2viz.R @@ -53,6 +53,7 @@ theme_genes2 <- function() { #' @importFrom shiny showNotification #' @importFrom stats na.omit #' @importFrom rlang sym +#' @importFrom rlang .data #' #' @return #' @export @@ -295,7 +296,7 @@ ipr2viz_web <- function(infile_ipr, ## @SAM, colnames, merges, everything neeeds to be done now based on the ## combined lookup table from "common_data" lookup_tbl_path <- "/data/research/jravilab/common_data/cln_lookup_tbl.tsv" - lookup_tbl <- read_tsv(lookup_tbl_path, col_names = T, col_types = lookup_table_cols) + lookup_tbl <- read_tsv(lookup_tbl_path, col_names = T, col_types = .data$lookup_table_cols) ## Read IPR file and subset by Accessions ipr_out <- read_tsv(infile_ipr, col_names = T) @@ -303,7 +304,7 @@ ipr2viz_web <- function(infile_ipr, ## Need to fix eventually based on 'real' gene orientation! ipr_out$Strand <- rep("forward", nrow(ipr_out)) - ipr_out <- ipr_out %>% arrange(AccNum, StartLoc, StopLoc) + ipr_out <- ipr_out %>% arrange(.data$AccNum, .data$StartLoc, .data$StopLoc) ipr_out_sub <- filter( ipr_out, grepl(pattern = analysis, x = Analysis) diff --git a/R/plotting.R b/R/plotting.R index 7abd06d4..ef803f10 100644 --- a/R/plotting.R +++ b/R/plotting.R @@ -521,8 +521,8 @@ lineage.neighbors.plot <- function(query_data = "prot", query = "pspa", gather(key = TopNeighbors.DA, value = count, 19:ncol(query_data)) %>% select("Lineage", "TopNeighbors.DA", "count") %>% # "DomArch.norep","GenContext.norep", group_by(TopNeighbors.DA, Lineage) %>% - summarise(lincount = sum(count), bin = as.numeric(as.logical(lincount))) %>% - arrange(desc(lincount)) %>% + summarise(lincount =sum(count), bin = as.numeric(as.logical(.data$lincount))) %>% + arrange(desc(.data$lincount)) %>% within(TopNeighbors.DA <- factor(TopNeighbors.DA, levels = rev(names(sort(table(TopNeighbors.DA), decreasing = TRUE @@ -538,9 +538,9 @@ lineage.neighbors.plot <- function(query_data = "prot", query = "pspa", geom_tile( data = subset( query.ggplot, - !is.na(lincount) + !is.na(.data$lincount) ), # bin - aes(fill = lincount), # bin + aes(fill = .data$lincount), # bin colour = "coral3", size = 0.3 ) + # , width=0.7, height=0.7), scale_fill_gradient(low = "white", high = "darkred") + @@ -1223,13 +1223,13 @@ lineage_sunburst <- function(prot, lineage_column = "Lineage", group_by_at(levels_vec) %>% summarise(size = n()) protLevels <- protLevels %>% arrange() - tree <- d3_nest(protLevels, value_cols = "size") + tree <- .data$d3_nest(protLevels, value_cols = "size") # Plot sunburst if (type == "sunburst") { - result <- sunburst(tree, legend = list(w = 225, h = 15, r = 5, s = 5), colors = cpcols, legendOrder = legendOrder, width = "100%", height = "100%") + result <- sunburst(tree, legend = list(w = 225, h = 15, r = 5, s = 5), colors = .data$cpcols, legendOrder = legendOrder, width = "100%", height = "100%") } else if (type == "sund2b") { - result <- sund2b(tree) + result <- .data$sund2b(tree) } if (showLegend) { From d9fa04bc729586ab336275083d67fb75420ac138 Mon Sep 17 00:00:00 2001 From: Awa Synthia Date: Mon, 7 Oct 2024 07:42:08 +0300 Subject: [PATCH 09/41] use one documentation file Signed-off-by: Awa Synthia --- R/summarize.R | 16 ++- man/count_bycol.Rd | 38 ------ man/elements2words.Rd | 40 ------- man/filter_by_doms.Rd | 44 ------- man/filter_freq.Rd | 28 ----- man/summ.DA.Rd | 31 ----- man/summ.DA.byLin.Rd | 27 ----- man/summ.GC.Rd | 32 ----- man/summ.GC.byDALin.Rd | 33 ------ man/summ.GC.byLin.Rd | 22 ---- man/summarize.Rd | 260 +++++++++++++++++++++++++++++++++++++++++ man/summarize_bylin.Rd | 36 ------ man/total_counts.Rd | 58 --------- man/words2wc.Rd | 32 ----- 14 files changed, 274 insertions(+), 423 deletions(-) delete mode 100644 man/count_bycol.Rd delete mode 100644 man/elements2words.Rd delete mode 100644 man/filter_by_doms.Rd delete mode 100644 man/filter_freq.Rd delete mode 100644 man/summ.DA.Rd delete mode 100644 man/summ.DA.byLin.Rd delete mode 100644 man/summ.GC.Rd delete mode 100644 man/summ.GC.byDALin.Rd delete mode 100644 man/summ.GC.byLin.Rd create mode 100644 man/summarize.Rd delete mode 100644 man/summarize_bylin.Rd delete mode 100644 man/total_counts.Rd delete mode 100644 man/words2wc.Rd diff --git a/R/summarize.R b/R/summarize.R index e03ca463..0580c15d 100644 --- a/R/summarize.R +++ b/R/summarize.R @@ -29,6 +29,7 @@ #' #' @return Filtered data frame #' @note There is no need to make the domains 'regex safe', that will be handled by this function +#' @name summarize #' @export #' #' @examples @@ -110,7 +111,7 @@ filter_by_doms <- function(prot, column = "DomArch", doms_keep = c(), doms_remov #' The tibble is filtered to only include elements that have a frequency #' greater than or equal to `min.freq` and does not include elements with `NA` #' values or those starting with a hyphen ("-"). -#' +#' @name summarize #' @export #' #' @examples @@ -155,6 +156,7 @@ count_bycol <- function(prot = prot, column = "DomArch", min.freq = 1) { #' @return A single string where elements are delimited by spaces. The function #' performs necessary substitutions based on the `conversion_type` and cleans up #' extraneous characters like newlines, tabs, and multiple spaces. +#' @name summarize #' #' @examples #' \dontrun{ @@ -212,6 +214,8 @@ elements2words <- function(prot, column = "DomArch", conversion_type = "da2doms" #' (domains or domain architectures).} #' \item{`freq`}{A column containing the frequency counts for each word.} #' } +#' +#' @name summarize #' #' @examples #' \dontrun{ @@ -259,6 +263,7 @@ words2wc <- function(string) { #' #' @return A tibble with the same structure as `x`, but filtered to include #' only rows where the frequency is greater than or equal to `min.freq`. +#' @name summarize #' #' @export #' @@ -290,6 +295,7 @@ filter_freq <- function(x, min.freq) { #' @return A tibble summarizing the counts of occurrences of elements in #' the `column`, grouped by the `by` column. The result includes the number #' of occurrences (`count`) and is arranged in descending order of count. +#' @name summarize #' @export #' #' @examples @@ -335,6 +341,7 @@ summarize_bylin <- function(prot = "prot", column = "DomArch", by = "Lineage", #' columns: `DomArch`, `Lineage`, and `count`, which indicates the frequency #' of each domain architecture for each lineage. The results are arranged in #' descending order of `count`. +#' @name summarize #' #' @export #' @@ -369,6 +376,7 @@ summ.DA.byLin <- function(x) { #' - `totallin`: The total number of unique lineages in which each `DomArch` #' appears. #' The results are arranged in descending order of `totallin` and `totalcount`. +#' @name summarize #' @export #' #' @examples @@ -401,6 +409,7 @@ summ.DA <- function(x) { #' `GenContext`, `DomArch`, and `Lineage`. #' #' The results are arranged in descending order of `count`. +#' @name summarize #' @export #' #' @examples @@ -421,11 +430,12 @@ summ.GC.byDALin <- function(x) { #' summ.GC.byLin #' -#' @param x +#' @param x A dataframe or tibble containing the data. #' #' @importFrom dplyr arrange desc filter group_by n summarise #' #' @return Describe return, in detail +#' @name summarize #' @export #' #' @examples @@ -459,6 +469,7 @@ summ.GC.byLin <- function(x) { #' `GenContext` and `Lineage`. #' #' The results are arranged in descending order of `count`. +#' @name summarize #' @export #' #' @examples @@ -512,6 +523,7 @@ summ.GC <- function(x) { #' - `IndividualCountPercent`: The percentage of each `totalcount` relative to #' the overall count. #' - `CumulativePercent`: The cumulative percentage of total counts. +#' @name summarize #' @export #' #' @note Please refer to the source code if you have alternate file formats and/or diff --git a/man/count_bycol.Rd b/man/count_bycol.Rd deleted file mode 100644 index 946a7ea2..00000000 --- a/man/count_bycol.Rd +++ /dev/null @@ -1,38 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{count_bycol} -\alias{count_bycol} -\title{Count Bycol} -\usage{ -count_bycol(prot = prot, column = "DomArch", min.freq = 1) -} -\arguments{ -\item{prot}{A data frame containing the dataset to analyze, typically with -multiple columns including the one specified by the \code{column} parameter.} - -\item{column}{A character string specifying the name of the column to analyze. -The default is "DomArch".} - -\item{min.freq}{An integer specifying the minimum frequency an element must -have to be included in the output. Default is 1.} -} -\value{ -A tibble with two columns: -\describe{ -\item{\code{column}}{The unique elements from the specified column -(e.g., "DomArch").} -\item{\code{freq}}{The frequency of each element, i.e., the number of times -each element appears in the specified column.} -} -The tibble is filtered to only include elements that have a frequency -greater than or equal to \code{min.freq} and does not include elements with \code{NA} -values or those starting with a hyphen ("-"). -} -\description{ -Count Bycol -} -\examples{ -\dontrun{ -count_bycol(prot = my_data, column = "DomArch", min.freq = 10) -} -} diff --git a/man/elements2words.Rd b/man/elements2words.Rd deleted file mode 100644 index bda447db..00000000 --- a/man/elements2words.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{elements2words} -\alias{elements2words} -\title{Elements 2 Words} -\usage{ -elements2words(prot, column = "DomArch", conversion_type = "da2doms") -} -\arguments{ -\item{prot}{A dataframe containing the dataset to analyze. The specified -\code{column} contains the string elements to be processed.} - -\item{column}{A character string specifying the name of the column to analyze. -Default is "DomArch".} - -\item{conversion_type}{A character string specifying the type of conversion. -Two options are available: -\describe{ -\item{\code{da2doms}}{Convert domain architectures into individual domains by -replacing \code{+} symbols with spaces.} -\item{\code{gc2da}}{Convert genomic context into domain architectures by -replacing directional symbols (\verb{<-}, \verb{->}, and \code{|}) with spaces.} -}} -} -\value{ -A single string where elements are delimited by spaces. The function -performs necessary substitutions based on the \code{conversion_type} and cleans up -extraneous characters like newlines, tabs, and multiple spaces. -} -\description{ -Break string ELEMENTS into WORDS for domain architecture (DA) and genomic -context (GC) -} -\examples{ -\dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", -"a+b", "b+c", "b-c")) |> elements2words() -} - -} diff --git a/man/filter_by_doms.Rd b/man/filter_by_doms.Rd deleted file mode 100644 index cfe255ca..00000000 --- a/man/filter_by_doms.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{filter_by_doms} -\alias{filter_by_doms} -\title{Filter by Domains} -\usage{ -filter_by_doms( - prot, - column = "DomArch", - doms_keep = c(), - doms_remove = c(), - ignore.case = FALSE -) -} -\arguments{ -\item{prot}{Dataframe to filter} - -\item{column}{Column to search for domains in (DomArch column)} - -\item{doms_keep}{Vector of domains that must be identified within column in order for -observation to be kept} - -\item{doms_remove}{Vector of domains that, if found within an observation, will be removed} - -\item{ignore.case}{Should the matching be non case sensitive} -} -\value{ -Filtered data frame -} -\description{ -filter_by_doms filters a data frame by identifying exact domain matches -and either keeping or removing rows with the identified domain -} -\note{ -There is no need to make the domains 'regex safe', that will be handled by this function -} -\examples{ -\dontrun{ -filter_by_doms() -} -} -\author{ -Samuel Chen, Janani Ravi -} diff --git a/man/filter_freq.Rd b/man/filter_freq.Rd deleted file mode 100644 index 9dfba73b..00000000 --- a/man/filter_freq.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{filter_freq} -\alias{filter_freq} -\title{Filter Frequency} -\usage{ -filter_freq(x, min.freq) -} -\arguments{ -\item{x}{A tibble (tbl_df) containing at least two columns: one for -elements (e.g., \code{words}) and one for their frequency (e.g., \code{freq}).} - -\item{min.freq}{A numeric value specifying the minimum frequency threshold. -Only elements with frequencies greater than or equal to this value will be -retained.} -} -\value{ -A tibble with the same structure as \code{x}, but filtered to include -only rows where the frequency is greater than or equal to \code{min.freq}. -} -\description{ -Filter Frequency -} -\examples{ -\dontrun{ -filter_freq() -} -} diff --git a/man/summ.DA.Rd b/man/summ.DA.Rd deleted file mode 100644 index 01d15b3c..00000000 --- a/man/summ.DA.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summ.DA} -\alias{summ.DA} -\title{summ.DA} -\usage{ -summ.DA(x) -} -\arguments{ -\item{x}{A dataframe or tibble containing the data. It must have a column -named \code{DomArch} and a count column, such as \code{count}, which represents the -occurrences of each architecture in various lineages.} -} -\value{ -A tibble summarizing each unique \code{DomArch}, along with the following -columns: -\itemize{ -\item \code{totalcount}: The total occurrences of each \code{DomArch} across all lineages. -\item \code{totallin}: The total number of unique lineages in which each \code{DomArch} -appears. -The results are arranged in descending order of \code{totallin} and \code{totalcount}. -} -} -\description{ -Function to retrieve counts of how many lineages a DomArch appears in -} -\examples{ -\dontrun{ -summ.DA() -} -} diff --git a/man/summ.DA.byLin.Rd b/man/summ.DA.byLin.Rd deleted file mode 100644 index d88e5d37..00000000 --- a/man/summ.DA.byLin.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summ.DA.byLin} -\alias{summ.DA.byLin} -\title{summ.DA.byLin} -\usage{ -summ.DA.byLin(x) -} -\arguments{ -\item{x}{A dataframe or tibble containing the data. It must have columns -named \code{DomArch} and \code{Lineage}.} -} -\value{ -A tibble summarizing the counts of unique domain architectures -(\code{DomArch}) per lineage (\code{Lineage}). The resulting table contains three -columns: \code{DomArch}, \code{Lineage}, and \code{count}, which indicates the frequency -of each domain architecture for each lineage. The results are arranged in -descending order of \code{count}. -} -\description{ -Function to summarize and retrieve counts by Domains & Domains+Lineage -} -\examples{ -\dontrun{ -summ.DA.byLin() -} -} diff --git a/man/summ.GC.Rd b/man/summ.GC.Rd deleted file mode 100644 index 2ec4d651..00000000 --- a/man/summ.GC.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summ.GC} -\alias{summ.GC} -\title{summ.GC} -\usage{ -summ.GC(x) -} -\arguments{ -\item{x}{A dataframe or tibble containing the data. It must have columns -named \code{GenContext}, \code{DomArch}, and \code{Lineage}.} -} -\value{ -A tibble summarizing each unique combination of \code{GenContext} and -\code{Lineage}, along with the following columns: -\itemize{ -\item \code{GenContext}: The genomic context for each entry. -\item \code{Lineage}: The lineage associated with each entry. -\item \code{count}: The total number of occurrences for each combination of -\code{GenContext} and \code{Lineage}. -} - -The results are arranged in descending order of \code{count}. -} -\description{ -summ.GC -} -\examples{ -\dontrun{ -summ.GC() -} -} diff --git a/man/summ.GC.byDALin.Rd b/man/summ.GC.byDALin.Rd deleted file mode 100644 index 7fc8d443..00000000 --- a/man/summ.GC.byDALin.Rd +++ /dev/null @@ -1,33 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summ.GC.byDALin} -\alias{summ.GC.byDALin} -\title{summ.GC.byDALin} -\usage{ -summ.GC.byDALin(x) -} -\arguments{ -\item{x}{A dataframe or tibble containing the data. It must have columns -named \code{GenContext}, \code{DomArch}, and \code{Lineage}.} -} -\value{ -A tibble summarizing each unique combination of \code{GenContext}, -\code{DomArch}, and \code{Lineage}, along with the following columns: -\itemize{ -\item \code{GenContext}: The genomic context for each entry. -\item \code{DomArch}: The domain architecture for each entry. -\item \code{Lineage}: The lineage associated with each entry. -\item \code{count}: The total number of occurrences for each combination of -\code{GenContext}, \code{DomArch}, and \code{Lineage}. -} - -The results are arranged in descending order of \code{count}. -} -\description{ -summ.GC.byDALin -} -\examples{ -\dontrun{ -summ.GC.byDALin -} -} diff --git a/man/summ.GC.byLin.Rd b/man/summ.GC.byLin.Rd deleted file mode 100644 index df2a8fb8..00000000 --- a/man/summ.GC.byLin.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summ.GC.byLin} -\alias{summ.GC.byLin} -\title{summ.GC.byLin} -\usage{ -summ.GC.byLin(x) -} -\arguments{ -\item{x}{} -} -\value{ -Describe return, in detail -} -\description{ -summ.GC.byLin -} -\examples{ -\dontrun{ -summ.GC.byLin() -} -} diff --git a/man/summarize.Rd b/man/summarize.Rd new file mode 100644 index 00000000..f149f686 --- /dev/null +++ b/man/summarize.Rd @@ -0,0 +1,260 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/summarize.R +\name{summarize} +\alias{summarize} +\alias{filter_by_doms} +\alias{count_bycol} +\alias{elements2words} +\alias{words2wc} +\alias{filter_freq} +\alias{summarize_bylin} +\alias{summ.DA.byLin} +\alias{summ.DA} +\alias{summ.GC.byDALin} +\alias{summ.GC.byLin} +\alias{summ.GC} +\alias{total_counts} +\title{Filter by Domains} +\usage{ +filter_by_doms( + prot, + column = "DomArch", + doms_keep = c(), + doms_remove = c(), + ignore.case = FALSE +) + +count_bycol(prot = prot, column = "DomArch", min.freq = 1) + +elements2words(prot, column = "DomArch", conversion_type = "da2doms") + +words2wc(string) + +filter_freq(x, min.freq) + +summarize_bylin(prot = "prot", column = "DomArch", by = "Lineage", query) + +summ.DA.byLin(x) + +summ.DA(x) + +summ.GC.byDALin(x) + +summ.GC.byLin(x) + +summ.GC(x) + +total_counts( + prot, + column = "DomArch", + lineage_col = "Lineage", + cutoff = 90, + RowsCutoff = FALSE, + digits = 2 +) +} +\arguments{ +\item{prot}{A data frame that must contain columns: +\itemize{\item Either 'GenContext' or 'DomArch.norep' \item count}} + +\item{column}{Character. The column to summarize, default is "DomArch".} + +\item{doms_keep}{Vector of domains that must be identified within column in order for +observation to be kept} + +\item{doms_remove}{Vector of domains that, if found within an observation, will be removed} + +\item{ignore.case}{Should the matching be non case sensitive} + +\item{min.freq}{A numeric value specifying the minimum frequency threshold. +Only elements with frequencies greater than or equal to this value will be +retained.} + +\item{conversion_type}{A character string specifying the type of conversion. +Two options are available: +\describe{ +\item{\code{da2doms}}{Convert domain architectures into individual domains by +replacing \code{+} symbols with spaces.} +\item{\code{gc2da}}{Convert genomic context into domain architectures by +replacing directional symbols (\verb{<-}, \verb{->}, and \code{|}) with spaces.} +}} + +\item{string}{A character string containing the elements (words) to count. +This would typically be a space-delimited string representing domain +architectures or genomic contexts.} + +\item{x}{A dataframe or tibble containing the data. It must have columns +named \code{GenContext}, \code{DomArch}, and \code{Lineage}.} + +\item{by}{A string representing the grouping column (e.g., \code{Lineage}). +Default is "Lineage".} + +\item{query}{A string specifying the query pattern for filtering the target +column. Use "all" to skip filtering and include all rows.} + +\item{lineage_col}{Character. The name of the lineage column, default is +"Lineage".} + +\item{cutoff}{Numeric. Cutoff for total count. Counts below this cutoff value +will not be shown. Default is 0.} + +\item{RowsCutoff}{Logical. If TRUE, filters based on cumulative percentage +cutoff. Default is FALSE.} + +\item{digits}{Numeric. Number of decimal places for percentage columns. +Default is 2.} +} +\value{ +Filtered data frame + +A tibble with two columns: +\describe{ +\item{\code{column}}{The unique elements from the specified column +(e.g., "DomArch").} +\item{\code{freq}}{The frequency of each element, i.e., the number of times +each element appears in the specified column.} +} +The tibble is filtered to only include elements that have a frequency +greater than or equal to \code{min.freq} and does not include elements with \code{NA} +values or those starting with a hyphen ("-"). + +A single string where elements are delimited by spaces. The function +performs necessary substitutions based on the \code{conversion_type} and cleans up +extraneous characters like newlines, tabs, and multiple spaces. + +A tibble (tbl_df) with two columns: +\describe{ +\item{\code{words}}{A column containing the individual words +(domains or domain architectures).} +\item{\code{freq}}{A column containing the frequency counts for each word.} +} + +A tibble with the same structure as \code{x}, but filtered to include +only rows where the frequency is greater than or equal to \code{min.freq}. + +A tibble summarizing the counts of occurrences of elements in +the \code{column}, grouped by the \code{by} column. The result includes the number +of occurrences (\code{count}) and is arranged in descending order of count. + +A tibble summarizing the counts of unique domain architectures +(\code{DomArch}) per lineage (\code{Lineage}). The resulting table contains three +columns: \code{DomArch}, \code{Lineage}, and \code{count}, which indicates the frequency +of each domain architecture for each lineage. The results are arranged in +descending order of \code{count}. + +A tibble summarizing each unique \code{DomArch}, along with the following +columns: +\itemize{ +\item \code{totalcount}: The total occurrences of each \code{DomArch} across all lineages. +\item \code{totallin}: The total number of unique lineages in which each \code{DomArch} +appears. +The results are arranged in descending order of \code{totallin} and \code{totalcount}. +} + +A tibble summarizing each unique combination of \code{GenContext}, +\code{DomArch}, and \code{Lineage}, along with the following columns: +\itemize{ +\item \code{GenContext}: The genomic context for each entry. +\item \code{DomArch}: The domain architecture for each entry. +\item \code{Lineage}: The lineage associated with each entry. +\item \code{count}: The total number of occurrences for each combination of +\code{GenContext}, \code{DomArch}, and \code{Lineage}. +} + +The results are arranged in descending order of \code{count}. + +Describe return, in detail + +A tibble summarizing each unique combination of \code{GenContext} and +\code{Lineage}, along with the following columns: +\itemize{ +\item \code{GenContext}: The genomic context for each entry. +\item \code{Lineage}: The lineage associated with each entry. +\item \code{count}: The total number of occurrences for each combination of +\code{GenContext} and \code{Lineage}. +} + +The results are arranged in descending order of \code{count}. + +A data frame with the following columns: +\itemize{ +\item \code{{{ column }}}: Unique values from the specified column. +\item \code{totalcount}: The total count of occurrences for each unique value in +the specified column. +\item \code{IndividualCountPercent}: The percentage of each \code{totalcount} relative to +the overall count. +\item \code{CumulativePercent}: The cumulative percentage of total counts. +} +} +\description{ +filter_by_doms filters a data frame by identifying exact domain matches +and either keeping or removing rows with the identified domain + +Break string ELEMENTS into WORDS for domain architecture (DA) and genomic +context (GC) + +Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} + +Function to summarize and retrieve counts by Domains & Domains+Lineage + +Function to retrieve counts of how many lineages a DomArch appears in + +Creates a data frame with a totalcount column + +This function is designed to sum the counts column by either Genomic Context or Domain Architecture and creates a totalcount column from those sums. +} +\note{ +There is no need to make the domains 'regex safe', that will be handled by this function + +Please refer to the source code if you have alternate file formats and/or +column names. +} +\examples{ +\dontrun{ +filter_by_doms() +} +\dontrun{ +count_bycol(prot = my_data, column = "DomArch", min.freq = 10) +} +\dontrun{ +tibble::tibble(DomArch = c("aaa+bbb", +"a+b", "b+c", "b-c")) |> elements2words() +} + +\dontrun{ +tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> + elements2words() |> + words2wc() +} + +\dontrun{ +filter_freq() +} +\dontrun{ +library(tidyverse) +tibble(DomArch = c("a+b", "a+b", "b+c", "a+b"), Lineage = c("l1", "l1", "l1", "l2")) |> + summarize_bylin(query = "all") +} + +\dontrun{ +summ.DA.byLin() +} +\dontrun{ +summ.DA() +} +\dontrun{ +summ.GC.byDALin +} +\dontrun{ +summ.GC.byLin() +} +\dontrun{ +summ.GC() +} +\dontrun{ +total_counts(pspa - gc_lin_counts, 0, "GC") +} +} +\author{ +Samuel Chen, Janani Ravi +} diff --git a/man/summarize_bylin.Rd b/man/summarize_bylin.Rd deleted file mode 100644 index 92b93652..00000000 --- a/man/summarize_bylin.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarize_bylin} -\alias{summarize_bylin} -\title{Summarize by Lineage} -\usage{ -summarize_bylin(prot = "prot", column = "DomArch", by = "Lineage", query) -} -\arguments{ -\item{prot}{A dataframe or tibble containing the data.} - -\item{column}{A string representing the column to be summarized -(e.g., \code{DomArch}). Default is "DomArch".} - -\item{by}{A string representing the grouping column (e.g., \code{Lineage}). -Default is "Lineage".} - -\item{query}{A string specifying the query pattern for filtering the target -column. Use "all" to skip filtering and include all rows.} -} -\value{ -A tibble summarizing the counts of occurrences of elements in -the \code{column}, grouped by the \code{by} column. The result includes the number -of occurrences (\code{count}) and is arranged in descending order of count. -} -\description{ -Summarize by Lineage -} -\examples{ -\dontrun{ -library(tidyverse) -tibble(DomArch = c("a+b", "a+b", "b+c", "a+b"), Lineage = c("l1", "l1", "l1", "l2")) |> - summarize_bylin(query = "all") -} - -} diff --git a/man/total_counts.Rd b/man/total_counts.Rd deleted file mode 100644 index 53d70096..00000000 --- a/man/total_counts.Rd +++ /dev/null @@ -1,58 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{total_counts} -\alias{total_counts} -\title{Total Counts} -\usage{ -total_counts( - prot, - column = "DomArch", - lineage_col = "Lineage", - cutoff = 90, - RowsCutoff = FALSE, - digits = 2 -) -} -\arguments{ -\item{prot}{A data frame that must contain columns: -\itemize{\item Either 'GenContext' or 'DomArch.norep' \item count}} - -\item{column}{Character. The column to summarize, default is "DomArch".} - -\item{lineage_col}{Character. The name of the lineage column, default is -"Lineage".} - -\item{cutoff}{Numeric. Cutoff for total count. Counts below this cutoff value -will not be shown. Default is 0.} - -\item{RowsCutoff}{Logical. If TRUE, filters based on cumulative percentage -cutoff. Default is FALSE.} - -\item{digits}{Numeric. Number of decimal places for percentage columns. -Default is 2.} -} -\value{ -A data frame with the following columns: -\itemize{ -\item \code{{{ column }}}: Unique values from the specified column. -\item \code{totalcount}: The total count of occurrences for each unique value in -the specified column. -\item \code{IndividualCountPercent}: The percentage of each \code{totalcount} relative to -the overall count. -\item \code{CumulativePercent}: The cumulative percentage of total counts. -} -} -\description{ -Creates a data frame with a totalcount column - -This function is designed to sum the counts column by either Genomic Context or Domain Architecture and creates a totalcount column from those sums. -} -\note{ -Please refer to the source code if you have alternate file formats and/or -column names. -} -\examples{ -\dontrun{ -total_counts(pspa - gc_lin_counts, 0, "GC") -} -} diff --git a/man/words2wc.Rd b/man/words2wc.Rd deleted file mode 100644 index 69d006d5..00000000 --- a/man/words2wc.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{words2wc} -\alias{words2wc} -\title{Words 2 Word Counts} -\usage{ -words2wc(string) -} -\arguments{ -\item{string}{A character string containing the elements (words) to count. -This would typically be a space-delimited string representing domain -architectures or genomic contexts.} -} -\value{ -A tibble (tbl_df) with two columns: -\describe{ -\item{\code{words}}{A column containing the individual words -(domains or domain architectures).} -\item{\code{freq}}{A column containing the frequency counts for each word.} -} -} -\description{ -Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} -} -\examples{ -\dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> - elements2words() |> - words2wc() -} - -} From 4aeaa113927b6f94b21c9f0dd0956bb7e48004a5 Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Mon, 7 Oct 2024 22:50:16 +0100 Subject: [PATCH 10/41] Add error handling to multiple functions - Implement error handling for mapOption2Process, get_proc_medians, write_proc_medians_table, get_proc_weights, advanced_opts2est_walltime, assign_job_queue, and plot_estimated_walltimes . - Validate input arguments for each function to ensure they meet expected criteria. - Use tryCatch blocks to gracefully handle errors and warnings. - Provide informative error messages and detailed logging where appropriate. - Ensure functions fail gracefully and provide useful feedback. Also renamed the functions to the following; assign_job_queue -> assignJobQueue make_opts2procs -> mapOption2Process map_advanced_opts2procs -> mapAdvOption2Process get_proc_medians - calculateProcessRuntime write_proc_medians_table -> writeProcessRuntime2TSV write_proc_medians_yml -> writeProcessRuntime2YML get_proc_weights -> getProcessRuntimeWeights advanced_opts2est_walltime -> calculateEstimatedWallTimeFromOpts plot_estimated_walltimes -> plotEstimatedWallTimes --- NAMESPACE | 26 +- R/assign_job_queue.R | 484 ++++++++++++------ R/clean_clust_file.R | 4 +- R/combine_analysis.R | 4 +- R/combine_files.R | 10 +- R/create_lineage_lookup.R | 6 +- ...{assign_job_queue.Rd => assignJobQueue.Rd} | 13 +- ... calculateEstimatedWallTimeFromOptions.Rd} | 12 +- ..._medians.Rd => calculateProcessRuntime.Rd} | 10 +- ...lean_clust_file.Rd => cleanClusterFile.Rd} | 8 +- man/{combine_files.Rd => combineFiles.Rd} | 6 +- ...combine_full.Rd => combineFullAnalysis.Rd} | 6 +- man/{combine_ipr.Rd => combineIPR.Rd} | 6 +- ...neage_lookup.Rd => createLineageLookup.Rd} | 6 +- ...weights.Rd => getProcessRuntimeWeights.Rd} | 8 +- ..._opts2procs.Rd => mapAdvOption2Process.Rd} | 8 +- ...ake_opts2procs.Rd => mapOption2Process.Rd} | 8 +- ...walltimes.Rd => plotEstimatedWallTimes.Rd} | 11 +- ...ns_table.Rd => writeProcessRuntime2TSV.Rd} | 8 +- ...ans_yml.Rd => writeProcessRuntimeToYML.Rd} | 13 +- 20 files changed, 416 insertions(+), 241 deletions(-) rename man/{assign_job_queue.Rd => assignJobQueue.Rd} (64%) rename man/{advanced_opts2est_walltime.Rd => calculateEstimatedWallTimeFromOptions.Rd} (68%) rename man/{get_proc_medians.Rd => calculateProcessRuntime.Rd} (76%) rename man/{clean_clust_file.Rd => cleanClusterFile.Rd} (82%) rename man/{combine_files.Rd => combineFiles.Rd} (92%) rename man/{combine_full.Rd => combineFullAnalysis.Rd} (69%) rename man/{combine_ipr.Rd => combineIPR.Rd} (74%) rename man/{create_lineage_lookup.Rd => createLineageLookup.Rd} (91%) rename man/{get_proc_weights.Rd => getProcessRuntimeWeights.Rd} (73%) rename man/{map_advanced_opts2procs.Rd => mapAdvOption2Process.Rd} (76%) rename man/{make_opts2procs.Rd => mapOption2Process.Rd} (75%) rename man/{plot_estimated_walltimes.Rd => plotEstimatedWallTimes.Rd} (55%) rename man/{write_proc_medians_table.Rd => writeProcessRuntime2TSV.Rd} (77%) rename man/{write_proc_medians_yml.Rd => writeProcessRuntimeToYML.Rd} (61%) diff --git a/NAMESPACE b/NAMESPACE index 16cf0813..9c038631 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -12,26 +12,27 @@ export(add_leaves) export(add_lins) export(add_name) export(add_tax) -export(advanced_opts2est_walltime) export(alignFasta) export(assert_count_df) -export(assign_job_queue) +export(assignJobQueue) +export(calculateEstimatedWallTimeFromOptions) +export(calculateProcessRuntime) export(cleanup_GeneDesc) export(cleanup_clust) export(cleanup_domarch) export(cleanup_gencontext) export(cleanup_lineage) export(cleanup_species) -export(combine_files) -export(combine_full) -export(combine_ipr) +export(combineFiles) +export(combineFullAnalysis) +export(combineIPR) export(convert_aln2fa) export(convert_fa2tre) export(count_bycol) export(count_to_sunburst) export(count_to_treemap) +export(createLineageLookup) export(create_all_col_params) -export(create_lineage_lookup) export(create_one_col_params) export(domain_network) export(efetch_ipg) @@ -45,10 +46,9 @@ export(generate_all_aln2fa) export(generate_fa2tre) export(generate_msa) export(generate_trees) +export(getProcessRuntimeWeights) export(get_accnums_from_fasta_file) export(get_job_message) -export(get_proc_medians) -export(get_proc_weights) export(ipg2lin) export(ipr2viz) export(ipr2viz_web) @@ -58,12 +58,12 @@ export(lineage.domain_repeats.plot) export(lineage.neighbors.plot) export(lineage_sunburst) export(make_job_results_url) -export(make_opts2procs) +export(mapAdvOption2Process) +export(mapOption2Process) export(map_acc2name) -export(map_advanced_opts2procs) export(msa_pdf) export(pick_longer_duplicate) -export(plot_estimated_walltimes) +export(plotEstimatedWallTimes) export(prot2tax) export(prot2tax_old) export(remove_astrk) @@ -95,8 +95,8 @@ export(wordcloud2_element) export(wordcloud3) export(wordcloud_element) export(write.MsaAAMultipleAlignment) -export(write_proc_medians_table) -export(write_proc_medians_yml) +export(writeProcessRuntime2TSV) +export(writeProcessRuntimeToYML) importFrom(Biostrings,AAStringSet) importFrom(Biostrings,readAAStringSet) importFrom(Biostrings,toString) diff --git a/R/assign_job_queue.R b/R/assign_job_queue.R index bc5253d4..f1fcb6db 100644 --- a/R/assign_job_queue.R +++ b/R/assign_job_queue.R @@ -3,22 +3,32 @@ # pipeline. # to use this, construct paths like so: file.path(common_root, "path", "to", "file.R") # for example, the reference for this file would be: -# file.path(common_root, "molevol_scripts", "R", "assign_job_queue.R") +# file.path(common_root, "molevol_scripts", "R", "assignJobQueue.R") common_root <- Sys.getenv("COMMON_SRC_ROOT") #' Construct list where names (MolEvolvR advanced options) point to processes #' #' @return list where names (MolEvolvR advanced options) point to processes #' -#' example: list_opts2procs <- make_opts2procs +#' example: list_opts2procs <- mapOption2Process #' @export -make_opts2procs <- function() { +mapOption2Process <- function() { + tryCatch({ opts2processes <- list( - "homology_search" = c("dblast", "dblast_cleanup"), - "domain_architecture" = c("iprscan", "ipr2lineage", "ipr2da"), - "always" = c("blast_clust", "clust2table") # processes always present agnostic of advanced options + "homology_search" = c("dblast", "dblast_cleanup"), + "domain_architecture" = c("iprscan", "ipr2lineage", "ipr2da"), + # processes always present agnostic of advanced options + "always" = c("blast_clust", "clust2table") ) return(opts2processes) + }, error = function(e) { + message(paste("Encountered an error: ", e$message)) + }, warning = function(w) { + message(paste("Warning: ", w$message)) + }, finally = { + message("mapOption2Process function execution completed.") + }) + } #' Use MolEvolvR advanced options to get associated processes @@ -30,17 +40,29 @@ make_opts2procs <- function() { #' #' example: #' advanced_opts <- c("homology_search", "domain_architecture") -#' procs <- map_advanced_opts2procs(advanced_opts) +#' procs <- mapAdvOption2Process(advanced_opts) #' @export -map_advanced_opts2procs <- function(advanced_opts) { +mapAdvOption2Process <- function(advanced_opts) { + if (!is.character(advanced_opts)) { + stop("Argument must be a character vector!") + } + tryCatch({ # append 'always' to add procs that always run advanced_opts <- c(advanced_opts, "always") - opts2proc <- make_opts2procs() + opts2proc <- mapOption2Process() # setup index for opts2proc based on advanced options idx <- which(names(opts2proc) %in% advanced_opts) # extract processes that will run procs <- opts2proc[idx] |> unlist() return(procs) + }, error = function(e) { + message(paste("Encountered an error: ", e$message)) + }, warning = function(w) { + message(paste("Warning: ", w$message)) + }, finally = { + message("mapOption2Process function execution completed.") + }) + } #' Scrape MolEvolvR logs and calculate median processes @@ -58,47 +80,68 @@ map_advanced_opts2procs <- function(advanced_opts) { #' #' 1) #' dir_job_results <- "/data/scratch/janani/molevolvr_out" -#' list_proc_medians <- get_proc_medians(dir_job_results) +#' list_proc_medians <- calculateProcessRuntime(dir_job_results) #' #' 2) from outside container environment #' common_root <- "/data/molevolvr_transfer/molevolvr_dev" #' dir_job_results <- "/data/molevolvr_transfer/molevolvr_dev/job_results" -#' list_proc_medians <- get_proc_medians(dir_job_results) +#' list_proc_medians <- calculateProcessRuntime(dir_job_results) #' @export -get_proc_medians <- function(dir_job_results) { +calculateProcessRuntime <- function(dir_job_results) { + tryCatch({ + # Check if dir_job_results is a character string + if (!is.character(dir_job_results) || length(dir_job_results) != 1) { + stop("Input 'dir_job_results' must be a single character string.") + } + + # Check if dir_job_results exists + if (!dir.exists(dir_job_results)) { + stop(paste("The directory", dir_job_results, "does not exist.")) + } + source(file.path(common_root, "molevol_scripts", "R", "metrics.R")) # aggregate logs from - path_log_data <- file.path(common_root, "molevol_scripts", "log_data", "prod_logs.rda") + path_log_data <- file.path(common_root, + "molevol_scripts", "log_data", "prod_logs.rda") # ensure the folder exists to the location if (!dir.exists(path_log_data)) { - dir.create(dirname(path_log_data), recursive = TRUE, showWarnings = FALSE) + dir.create(dirname(path_log_data), + recursive = TRUE, showWarnings = FALSE) } # attempt to load pre-generated logdata if (!file.exists(path_log_data)) { - logs <- aggregate_logs(dir_job_results, latest_date = Sys.Date() - 60) - save(logs, file = path_log_data) + logs <- aggregate_logs(dir_job_results, latest_date = Sys.Date() - 60) + save(logs, file = path_log_data) } else { - load(path_log_data) # loads the logs object + load(path_log_data) # loads the logs object } df_log <- logs$df_log procs <- c( - "dblast", "dblast_cleanup", "iprscan", - "ipr2lineage", "ipr2da", "blast_clust", - "clust2table" + "dblast", "dblast_cleanup", "iprscan", + "ipr2lineage", "ipr2da", "blast_clust", + "clust2table" ) list_proc_medians <- df_log |> - dplyr::select(dplyr::all_of(procs)) |> - dplyr::summarise( - dplyr::across( - dplyr::everything(), - \(x) median(x, na.rm = TRUE) - ) - ) |> - as.list() + dplyr::select(dplyr::all_of(procs)) |> + dplyr::summarise( + dplyr::across( + dplyr::everything(), + \(x) median(x, na.rm = TRUE) + ) + ) |> + as.list() return(list_proc_medians) + }, error = function(e) { + message(paste("Encountered an error: ", e$message)) + }, warning = function(w) { + message(paste("Warning: ", w$message)) + }, finally = { + message("calculateProcessRuntime function execution completed.") + }) + } #' Write a table of 2 columns: 1) process and 2) median seconds @@ -113,51 +156,99 @@ get_proc_medians <- function(dir_job_results) { #' #' @return [tbl_df] 2 columns: 1) process and 2) median seconds #' -#' example: write_proc_medians_table( +#' example: writeProcessRuntime2TSV( #' "/data/scratch/janani/molevolvr_out/", #' "/data/scratch/janani/molevolvr_out/log_tbl.tsv" #' ) #' @export -write_proc_medians_table <- function(dir_job_results, filepath) { - df_proc_medians <- get_proc_medians(dir_job_results) |> - tibble::as_tibble() |> - tidyr::pivot_longer( - dplyr::everything(), - names_to = "process", - values_to = "median_seconds" - ) |> - dplyr::arrange(dplyr::desc(median_seconds)) +writeProcessRuntime2TSV <- function(dir_job_results, filepath) { + tryCatch({ + # Error handling for input arguments + if (!is.character(dir_job_results) || length(dir_job_results) != 1) { + stop("Input 'dir_job_results' must be a single character string.") + } + + if (!dir.exists(dir_job_results)) { + stop(paste("The directory", dir_job_results, "does not exist.")) + } + + if (!is.character(filepath) || length(filepath) != 1) { + stop("Input 'filepath' must be a single character string.") + } + df_proc_medians <- calculateProcessRuntime(dir_job_results) |> + tibble::as_tibble() |> + tidyr::pivot_longer( + dplyr::everything(), + names_to = "process", + values_to = "median_seconds" + ) |> + dplyr::arrange(dplyr::desc(median_seconds)) + + # Write the resulting tibble to a TSV file readr::write_tsv(df_proc_medians, file = filepath) return(df_proc_medians) + }, error = function(e) { + message(paste("Encountered an error: ", e$message)) + }, warning = function(w) { + message(paste("Warning: ", w$message)) + }, finally = { + message("writeProcessRuntime2TSV function execution completed.") + }) + } #' Compute median process runtimes, then write a YAML list of the processes and #' their median runtimes in seconds to the path specified by 'filepath'. #' #' The default value of filepath is the value of the env var -#' MOLEVOLVR_PROC_WEIGHTS, which get_proc_weights() also uses as its default +#' MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntimeToYML() also uses as its default #' read location. #' #' @param dir_job_results [chr] path to MolEvolvR job_results directory -#' @param filepath [chr] path to save YAML file; if NULL, uses ./molevol_scripts/log_data/job_proc_weights.yml +#' @param filepath [chr] path to save YAML file; if NULL, +#' uses ./molevol_scripts/log_data/job_proc_weights.yml #' #' @importFrom yaml write_yaml #' #' @examples #' \dontrun{ -#' write_proc_medians_yml( +#' writeProcessRuntimeToYML( #' "/data/scratch/janani/molevolvr_out/", #' "/data/scratch/janani/molevolvr_out/log_tbl.yml" #' ) #' } #' @export -write_proc_medians_yml <- function(dir_job_results, filepath = NULL) { +writeProcessRuntimeToYML <- function(dir_job_results, filepath = NULL) { + tryCatch({ + # Error handling for dir_job_results arguments + if (!is.character(dir_job_results) || length(dir_job_results) != 1) { + stop("Input 'dir_job_results' must be a single character string.") + } + + if (!dir.exists(dir_job_results)) { + stop(paste("The directory", dir_job_results, "does not exist.")) + } if (is.null(filepath)) { - filepath <- file.path(common_root, "molevol_scripts", "log_data", "job_proc_weights.yml") + filepath <- file.path(common_root, + "molevol_scripts", + "log_data", + "job_proc_weights.yml") + } + if (!is.character(filepath) || length(filepath) != 1) { + stop("Input 'filepath' must be a single character string.") } - medians <- get_proc_medians(dir_job_results) + medians <- calculateProcessRuntime(dir_job_results) yaml::write_yaml(medians, filepath) + }, error = function(e) { + message(paste("Encountered an error: "), e$message) + }, warning = function(w) { + message(paste("Warning: "), w$message) + }, finally = { + message("write_proc_medians_table function execution completed.") + } + ) + } #' Quickly get the runtime weights for MolEvolvR backend processes @@ -170,50 +261,52 @@ write_proc_medians_yml <- function(dir_job_results, filepath = NULL) { #' #' @return [list] names: processes; values: median runtime (seconds) #' -#' example: get_proc_weights() +#' example: writeProcessRuntimeToYML() #' @export -get_proc_weights <- function(medians_yml_path = NULL) { - if (is.null(medians_yml_path)) { - medians_yml_path <- file.path(common_root, "molevol_scripts", "log_data", "job_proc_weights.yml") +getProcessRuntimeWeights <- function(medians_yml_path = NULL) { + if (is.null(medians_yml_path)) { + medians_yml_path <- file.path(common_root, + "molevol_scripts", + "log_data", + "job_proc_weights.yml") + } + + proc_weights <- tryCatch({ + # attempt to read the weights from the YAML file produced by + # writeProcessRuntimeToYML() + if (stringr::str_trim(medians_yml_path) == "") { + stop( + stringr::str_glue("medians_yml_path is empty + ({medians_yml_path}), returning default weights") + ) } - proc_weights <- tryCatch( - { - # attempt to read the weights from the YAML file produced by - # write_proc_medians_yml() - if (stringr::str_trim(medians_yml_path) == "") { - stop( - stringr::str_glue("medians_yml_path is empty ({medians_yml_path}), returning default weights") - ) - } - - proc_weights <- yaml::read_yaml(medians_yml_path) - }, - # to avoid fatal errors in reading the proc weights yaml, - # some median process runtimes have been hardcoded based on - # the result of get_proc_medians() from Jan 2024 - error = function(cond) { - proc_weights <- list( - "dblast" = 2810, - "iprscan" = 1016, - "dblast_cleanup" = 79, - "ipr2lineage" = 18, - "ipr2da" = 12, - "blast_clust" = 2, - "clust2table" = 2 - ) - proc_weights - } + proc_weights <- yaml::read_yaml(medians_yml_path) + }, + # to avoid fatal errors in reading the proc weights yaml, + # some median process runtimes have been hardcoded based on + # the result of calculateProcessRuntime() from Jan 2024 + error = function(cond) { + proc_weights <- list( + "dblast" = 2810, + "iprscan" = 1016, + "dblast_cleanup" = 79, + "ipr2lineage" = 18, + "ipr2da" = 12, + "blast_clust" = 2, + "clust2table" = 2 ) + proc_weights + }) - return(proc_weights) + return(proc_weights) } #' Given MolEvolvR advanced options and number of inputs, #' calculate the total estimated walltime for the job #' #' @param advanced_opts character vector of MolEvolvR advanced options -#' (see make_opts2procs for the options) +#' (see mapOption2Process for the options) #' @param n_inputs total number of input proteins #' #' @importFrom dplyr if_else @@ -221,68 +314,129 @@ get_proc_weights <- function(medians_yml_path = NULL) { #' #' @return total estimated number of seconds a job will process (walltime) #' -#' example: advanced_opts2est_walltime(c("homology_search", "domain_architecture"), n_inputs = 3, n_hits = 50L) +#' example: calculateEstimatedWallTimeFromOptions(c("homology_search", +#' "domain_architecture"), +#' n_inputs = 3, n_hits = 50L) #' @export -advanced_opts2est_walltime <- function(advanced_opts, n_inputs = 1L, n_hits = NULL, verbose = FALSE) { +calculateEstimatedWallTimeFromOptions <- function(advanced_opts, + n_inputs = 1L, + n_hits = NULL, + verbose = FALSE) { + + tryCatch({ # to calculate est walltime for a homology search job, the number of hits # must be provided validation_fail <- is.null(n_hits) && "homology_search" %in% advanced_opts stopifnot(!validation_fail) - proc_weights <- get_proc_weights() + # Validate advanced_opts + if (!is.character(advanced_opts)) { + stop("Argument 'advanced_opts' must be a character vector.") + } + + # Validate n_inputs + if (!is.numeric(n_inputs) || length(n_inputs) != 1 || n_inputs <= 0) { + stop("Argument 'n_inputs' must be a single positive numeric value.") + } + + # Validate n_hits if homology_search is in advanced_opts + if ("homology_search" %in% advanced_opts && + (is.null(n_hits)|| !is.numeric(n_hits) + || length(n_hits) != 1 || n_hits < 0)) { + stop("Argument 'n_hits' must be a single non-negative numeric value when + 'homology_search' is in 'advanced_opts'.") + } + + # Get process weights + proc_weights <- writeProcessRuntimeToYML() + if (!is.list(proc_weights)) { + stop("Process weights could not be retrieved correctly.") + } + # sort process weights by names and convert to vec proc_weights <- proc_weights[order(names(proc_weights))] |> unlist() all_procs <- names(proc_weights) |> sort() # get processes from advanced options and sort by names - procs_from_opts <- map_advanced_opts2procs(advanced_opts) + procs_from_opts <- mapAdvOption2Process(advanced_opts) procs_from_opts <- sort(procs_from_opts) # binary encode: yes proc will run (1); else 0 binary_proc_vec <- dplyr::if_else(all_procs %in% procs_from_opts, 1L, 0L) # dot product of weights and procs to run; scaled by the number of inputs est_walltime <- (n_inputs * (binary_proc_vec %*% proc_weights)) |> - as.numeric() + as.numeric() # calculate the additional processes to run for the homologous hits if ("homology_search" %in% advanced_opts) { - opts2procs <- make_opts2procs() - # exclude the homology search processes for the homologous hits - procs2exclude_for_homologs <- opts2procs[["homology_search"]] - procs_homologs <- procs_from_opts[!(procs_from_opts %in% procs2exclude_for_homologs)] - binary_proc_vec_homolog <- dplyr::if_else(all_procs %in% procs_homologs, 1L, 0L) - # add the estimated walltime for processes run on the homologous hits - est_walltime <- est_walltime + - (n_hits * (binary_proc_vec_homolog %*% proc_weights) |> as.numeric()) + opts2procs <- mapOption2Process() + # exclude the homology search processes for the homologous hits + procs2exclude_for_homologs <- opts2procs[["homology_search"]] + procs_homologs <- procs_from_opts[!(procs_from_opts + %in% procs2exclude_for_homologs)] + binary_proc_vec_homolog <- dplyr::if_else(all_procs + %in% procs_homologs, 1L, 0L) + # add the estimated walltime for processes run on the homologous hits + est_walltime <- est_walltime + + (n_hits * (binary_proc_vec_homolog + %*% proc_weights) |> as.numeric()) } if (verbose) { - msg <- stringr::str_glue( - "warnings from advanced_opts2est_walltime():\n", - "\tn_inputs={n_inputs}\n", - "\tn_hits={ifelse(is.null(n_hits), 'null', n_hits)}\n", - "\test_walltime={est_walltime}\n\n" - ) - cat(file = stderr(), msg) + msg <- stringr::str_glue( + "warnings from calculateEstimatedWallTimeFromOptions():\n", + "\tn_inputs={n_inputs}\n", + "\tn_hits={ifelse(is.null(n_hits), 'null', n_hits)}\n", + "\test_walltime={est_walltime}\n\n" + ) + cat(file = stderr(), msg) } return(est_walltime) + }, error = function(e) { + message(paste("Encountered an error: ", e$message)) + }, warning = function(w) { + message(paste("Warning: ", w$message)) + }, finally = { + message("calculateEstimatedWallTimeFromOptions + function execution completed.") + }) + } + #' Decision function to assign job queue #' #' @param t_sec_estimate estimated number of seconds a job will process -#' (from advanced_opts2est_walltime()) +#' (from calculateEstimatedWallTimeFromOptions()) #' @param t_long threshold value that defines the lower bound for assigning a #' job to the "long queue" #' #' @return a string of "short" or "long" #' #' example: -#' advanced_opts2est_walltime(c("homology_search", "domain_architecture"), 3) |> -#' assign_job_queue() +#' calculateEstimatedWallTimeFromOptions(c("homology_search", +#' "domain_architecture"), 3) |> +#' assignJobQueue() #' @export -assign_job_queue <- function( - t_sec_estimate, - t_cutoff = 21600 # 6 hours - ) { +assignJobQueue <- function( + t_sec_estimate, + t_cutoff = 21600 # 6 hours +) { + tryCatch({ + if (!is.numeric(t_sec_estimate) || length(t_sec_estimate) != 1) { + stop("Argument 't_sec_estimate' must be a single numeric value.") + } + + if (!is.numeric(t_cutoff) || length(t_cutoff) != 1 || t_cutoff < 0) { + stop("Argument 't_cutoff' must be a single non-negative numeric value.") + } + queue <- ifelse(t_sec_estimate > t_cutoff, "long", "short") return(queue) + }, error = function(e) { + message(paste("Encountered an error: ", e$message)) + }, warning = function(w) { + message(paste("Warning: ", w$message)) + }, finally = { + message("assignJobQueue function execution completed.") + }) + } #' Plot the estimated runtimes for different advanced options and number @@ -297,81 +451,97 @@ assign_job_queue <- function( #' @return line plot object #' #' example: -#' p <- plot_estimated_walltimes() -#' ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) +#' p <- plotEstimatedWallTimes() +#' ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_ +#' dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) #' @export -plot_estimated_walltimes <- function() { - opts <- make_opts2procs() |> names() +plotEstimatedWallTimes <- function() { + tryCatch({ + opts <- mapOption2Process() |> names() # get all possible submission permutations (powerset) get_powerset <- function(vec) { - # generate powerset (do not include empty set) - n <- length(vec) - indices <- 1:n - powerset <- lapply(1:n, function(x) combn(indices, x, simplify = FALSE)) - powerset <- unlist(powerset, recursive = FALSE) - powerset <- lapply(powerset, function(index) vec[index]) - powerset + # generate powerset (do not include empty set) + n <- length(vec) + indices <- 1:n + powerset <- lapply(1:n, function(x) combn(indices, x, simplify = FALSE)) + powerset <- unlist(powerset, recursive = FALSE) + powerset <- lapply(powerset, function(index) vec[index]) + powerset } opts_power_set <- get_powerset(opts) est_walltimes <- list() for (i in 1:20) { - est_walltimes <- append( - x = est_walltimes, - values = sapply( - opts_power_set, - FUN = function(advanced_opts) { - # for simplicity, assume the default number of homologus hits (100) - n_hits <- if ("homology_search" %in% advanced_opts) { - 100 - } else { - NULL - } - est_walltime <- advanced_opts2est_walltime( - advanced_opts, - n_inputs = i, - n_hits = n_hits, - verbose = TRUE - ) - names(est_walltime) <- paste0(advanced_opts, collapse = "_") - est_walltime - } + est_walltimes <- append( + x = est_walltimes, + values = sapply( + opts_power_set, + FUN = function(advanced_opts) { + # for simplicity, assume the default number of homologus hits (100) + n_hits <- if ("homology_search" %in% advanced_opts) { + 100 + } else { + NULL + } + est_walltime <- calculateEstimatedWallTimeFromOptions( + advanced_opts, + n_inputs = i, + n_hits = n_hits, + verbose = TRUE ) + names(est_walltime) <- paste0(advanced_opts, collapse = "_") + est_walltime + } ) + ) } # concat all results to their unique names est_walltimes <- tapply( - unlist( - est_walltimes, - use.names = FALSE - ), - rep( - names(est_walltimes), - lengths(est_walltimes) - ), - FUN = c + unlist( + est_walltimes, + use.names = FALSE + ), + rep( + names(est_walltimes), + lengths(est_walltimes) + ), + FUN = c ) df_walltimes <- est_walltimes |> - unlist() |> - matrix(nrow = length(est_walltimes[[1]]), ncol = length(names(est_walltimes))) + unlist() |> + matrix(nrow = length(est_walltimes[[1]]), + ncol = length(names(est_walltimes))) colnames(df_walltimes) <- names(est_walltimes) df_walltimes <- df_walltimes |> tibble::as_tibble() # rm always col or powerset outcome without the "always" processes col_idx_keep <- grep(pattern = "always$", x = names(df_walltimes)) df_walltimes <- df_walltimes |> - dplyr::select(col_idx_keep) + dplyr::select(col_idx_keep) # bind n_inputs df_walltimes <- df_walltimes |> - dplyr::mutate(n_inputs = 1:20) - df_walltimes <- tidyr::gather(df_walltimes, key = "advanced_opts", value = "est_walltime", -n_inputs) + dplyr::mutate(n_inputs = 1:20) + df_walltimes <- tidyr::gather(df_walltimes, + key = "advanced_opts", + value = "est_walltime", + n_inputs) # sec to hrs df_walltimes <- df_walltimes |> - dplyr::mutate(est_walltime = est_walltime / 3600) - p <- ggplot2::ggplot(df_walltimes, ggplot2::aes(x = n_inputs, y = est_walltime, color = advanced_opts)) + - ggplot2::geom_line() + - ggplot2::labs( - title = "MolEvolvR estimated runtimes", - x = "Number of inputs", - y = "Estimated walltime (hours)" - ) + dplyr::mutate(est_walltime = est_walltime / 3600) + p <- ggplot2::ggplot(df_walltimes, ggplot2::aes(x = n_inputs, + y = est_walltime, + color = advanced_opts)) + + ggplot2::geom_line() + + ggplot2::labs( + title = "MolEvolvR estimated runtimes", + x = "Number of inputs", + y = "Estimated walltime (hours)" + ) return(p) + }, error = function(e) { + message(paste("Encountered an error: ", e$message)) + }, warning = function(w) { + message(paste("Warning: ", w$message)) + }, finally = { + message("plotEstimatedWallTimes function execution completed.") + }) + } diff --git a/R/clean_clust_file.R b/R/clean_clust_file.R index d3f813e5..87dcde70 100755 --- a/R/clean_clust_file.R +++ b/R/clean_clust_file.R @@ -55,9 +55,9 @@ #' #' @examples #' \dontrun{ -#' clean_clust_file("data/pspa.op_ins_cls", writepath = NULL, query = "pspa") +#' cleanClusterFile("data/pspa.op_ins_cls", writepath = NULL, query = "pspa") #' } -clean_clust_file <- function(path, writepath = NULL, query) { +cleanClusterFile <- function(path, writepath = NULL, query) { # ?? does the following line need to be changed to read_lines()? prot <- read_tsv(path, col_names = F) diff --git a/R/combine_analysis.R b/R/combine_analysis.R index bb3b3ce2..58ce1f14 100755 --- a/R/combine_analysis.R +++ b/R/combine_analysis.R @@ -17,7 +17,7 @@ #' @export #' #' @examples -combine_full <- function(inpath, ret = FALSE) { +combineFullAnalysis <- function(inpath, ret = FALSE) { ## Combining full_analysis files full_combnd <- combine_files(inpath, pattern = "*.full_analysis.tsv", skip = 0, @@ -44,7 +44,7 @@ combine_full <- function(inpath, ret = FALSE) { #' @export #' #' @examples -combine_ipr <- function(inpath, ret = FALSE) { +combineIPR <- function(inpath, ret = FALSE) { ## Combining clean ipr files ipr_combnd <- combine_files(inpath, pattern = "*.iprscan_cln.tsv", skip = 0, diff --git a/R/combine_files.R b/R/combine_files.R index 76c5fa09..455ddd53 100755 --- a/R/combine_files.R +++ b/R/combine_files.R @@ -38,7 +38,7 @@ #' @export #' #' @examples -combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense/"), +combineFiles <- function(inpath = c("../molevol_data/project_data/phage_defense/"), pattern = "*full_analysis.tsv", delim = "\t", skip = 0, col_names = T) { @@ -67,7 +67,7 @@ combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense ## Sample Runs ## ################# # ## Combining full_analysis files -# full_combnd <- combine_files(inpath, +# full_combnd <- combineFiles(inpath, # pattern="*full_analysis.txt", skip=0, # col_names=T) # @@ -75,7 +75,7 @@ combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense # path="../molevol_data/project_data/slps/full_combined.tsv") # # ## Combining clean files -# cln_combnd <- combine_files(inpath, +# cln_combnd <- combineFiles(inpath, # pattern="^.*cln.txt", skip=0, # col_names=T) # @@ -86,14 +86,14 @@ combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense # ## Less helpful examples! # ## Combining BLAST files # ## Likely makes no sense since clustering is done per query -# cl_blast_combnd <- combine_files(inpath, +# cl_blast_combnd <- combineFiles(inpath, # pattern="^.*refseq.1e-5.txt", skip=0, # col_names=cl_blast_colnames) %>% # select(-PcPositive, -ClusterID) # # ## Combining IPR files # ## Likely makes no sense since there may be repeated AccNum from indiv. files! -# ipr_combnd <- combine_files(inpath, +# ipr_combnd <- combineFiles(inpath, # pattern="*iprscan.lins*", skip=0, # col_names=ipr_colnames) # diff --git a/R/create_lineage_lookup.R b/R/create_lineage_lookup.R index e7374df3..d911934a 100644 --- a/R/create_lineage_lookup.R +++ b/R/create_lineage_lookup.R @@ -26,9 +26,9 @@ #' @export #' #' @examples -create_lineage_lookup <- function(lineage_file = here("data/rankedlineage.dmp"), +createLineageLookup <- function(lineage_file = here("data/rankedlineage.dmp"), outfile, taxonomic_rank = "phylum") { - shorten_NA <- function(Lineage) { + .shortenNA <- function(Lineage) { first_NA <- str_locate(Lineage, "NA")[1] if (is.na(first_NA)) { # No NAs @@ -92,7 +92,7 @@ create_lineage_lookup <- function(lineage_file = here("data/rankedlineage.dmp"), # Takes a while (2million rows after all) rankedLinsCombined <- rankedLins %>% unite(col = "Lineage", all_of(combined_taxonomy), sep = ">") %>% - mutate(Lineage = unlist(map(Lineage, shorten_NA))) + mutate(Lineage = unlist(map(Lineage, .shortenNA))) diff --git a/man/assign_job_queue.Rd b/man/assignJobQueue.Rd similarity index 64% rename from man/assign_job_queue.Rd rename to man/assignJobQueue.Rd index ceb6fa77..27511b6a 100644 --- a/man/assign_job_queue.Rd +++ b/man/assignJobQueue.Rd @@ -1,14 +1,14 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{assign_job_queue} -\alias{assign_job_queue} +\name{assignJobQueue} +\alias{assignJobQueue} \title{Decision function to assign job queue} \usage{ -assign_job_queue(t_sec_estimate, t_cutoff = 21600) +assignJobQueue(t_sec_estimate, t_cutoff = 21600) } \arguments{ \item{t_sec_estimate}{estimated number of seconds a job will process -(from advanced_opts2est_walltime())} +(from calculateEstimatedWallTimeFromOptions())} \item{t_long}{threshold value that defines the lower bound for assigning a job to the "long queue"} @@ -17,8 +17,9 @@ job to the "long queue"} a string of "short" or "long" example: -advanced_opts2est_walltime(c("homology_search", "domain_architecture"), 3) |> -assign_job_queue() +calculateEstimatedWallTimeFromOptions(c("homology_search", +"domain_architecture"), 3) |> +assignJobQueue() } \description{ Decision function to assign job queue diff --git a/man/advanced_opts2est_walltime.Rd b/man/calculateEstimatedWallTimeFromOptions.Rd similarity index 68% rename from man/advanced_opts2est_walltime.Rd rename to man/calculateEstimatedWallTimeFromOptions.Rd index ea4b29e6..e4eec3fd 100644 --- a/man/advanced_opts2est_walltime.Rd +++ b/man/calculateEstimatedWallTimeFromOptions.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{advanced_opts2est_walltime} -\alias{advanced_opts2est_walltime} +\name{calculateEstimatedWallTimeFromOptions} +\alias{calculateEstimatedWallTimeFromOptions} \title{Given MolEvolvR advanced options and number of inputs, calculate the total estimated walltime for the job} \usage{ -advanced_opts2est_walltime( +calculateEstimatedWallTimeFromOptions( advanced_opts, n_inputs = 1L, n_hits = NULL, @@ -14,14 +14,16 @@ advanced_opts2est_walltime( } \arguments{ \item{advanced_opts}{character vector of MolEvolvR advanced options -(see make_opts2procs for the options)} +(see mapOption2Process for the options)} \item{n_inputs}{total number of input proteins} } \value{ total estimated number of seconds a job will process (walltime) -example: advanced_opts2est_walltime(c("homology_search", "domain_architecture"), n_inputs = 3, n_hits = 50L) +example: calculateEstimatedWallTimeFromOptions(c("homology_search", +"domain_architecture"), +n_inputs = 3, n_hits = 50L) } \description{ Given MolEvolvR advanced options and number of inputs, diff --git a/man/get_proc_medians.Rd b/man/calculateProcessRuntime.Rd similarity index 76% rename from man/get_proc_medians.Rd rename to man/calculateProcessRuntime.Rd index b6db0b56..bb6dd1ed 100644 --- a/man/get_proc_medians.Rd +++ b/man/calculateProcessRuntime.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{get_proc_medians} -\alias{get_proc_medians} +\name{calculateProcessRuntime} +\alias{calculateProcessRuntime} \title{Scrape MolEvolvR logs and calculate median processes} \usage{ -get_proc_medians(dir_job_results) +calculateProcessRuntime(dir_job_results) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results @@ -21,12 +21,12 @@ examples: } dir_job_results <- "/data/scratch/janani/molevolvr_out" -list_proc_medians <- get_proc_medians(dir_job_results) +list_proc_medians <- calculateProcessRuntime(dir_job_results) \enumerate{ \item from outside container environment common_root <- "/data/molevolvr_transfer/molevolvr_dev" dir_job_results <- "/data/molevolvr_transfer/molevolvr_dev/job_results" -list_proc_medians <- get_proc_medians(dir_job_results) +list_proc_medians <- calculateProcessRuntime(dir_job_results) } } \description{ diff --git a/man/clean_clust_file.Rd b/man/cleanClusterFile.Rd similarity index 82% rename from man/clean_clust_file.Rd rename to man/cleanClusterFile.Rd index bba3072e..d2818662 100644 --- a/man/clean_clust_file.Rd +++ b/man/cleanClusterFile.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/clean_clust_file.R -\name{clean_clust_file} -\alias{clean_clust_file} +\name{cleanClusterFile} +\alias{cleanClusterFile} \title{Clean Cluster File} \usage{ -clean_clust_file(path, writepath = NULL, query) +cleanClusterFile(path, writepath = NULL, query) } \arguments{ \item{path}{A character to the path of the cluster file to be cleaned} @@ -24,6 +24,6 @@ This function reads a space-separated cluster file and converts it to a cleaned } \examples{ \dontrun{ -clean_clust_file("data/pspa.op_ins_cls", writepath = NULL, query = "pspa") +cleanClusterFile("data/pspa.op_ins_cls", writepath = NULL, query = "pspa") } } diff --git a/man/combine_files.Rd b/man/combineFiles.Rd similarity index 92% rename from man/combine_files.Rd rename to man/combineFiles.Rd index 4126eb9e..3b56b923 100644 --- a/man/combine_files.Rd +++ b/man/combineFiles.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_files.R -\name{combine_files} -\alias{combine_files} +\name{combineFiles} +\alias{combineFiles} \title{Download the combined assembly summaries of genbank and refseq} \usage{ -combine_files( +combineFiles( inpath = c("../molevol_data/project_data/phage_defense/"), pattern = "*full_analysis.tsv", delim = "\\t", diff --git a/man/combine_full.Rd b/man/combineFullAnalysis.Rd similarity index 69% rename from man/combine_full.Rd rename to man/combineFullAnalysis.Rd index f4e6597b..35925e86 100644 --- a/man/combine_full.Rd +++ b/man/combineFullAnalysis.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_analysis.R -\name{combine_full} -\alias{combine_full} +\name{combineFullAnalysis} +\alias{combineFullAnalysis} \title{Combining full_analysis files} \usage{ -combine_full(inpath, ret = FALSE) +combineFullAnalysis(inpath, ret = FALSE) } \arguments{ \item{ret}{} diff --git a/man/combine_ipr.Rd b/man/combineIPR.Rd similarity index 74% rename from man/combine_ipr.Rd rename to man/combineIPR.Rd index 52aa3057..035c4274 100644 --- a/man/combine_ipr.Rd +++ b/man/combineIPR.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_analysis.R -\name{combine_ipr} -\alias{combine_ipr} +\name{combineIPR} +\alias{combineIPR} \title{Combining clean ipr files} \usage{ -combine_ipr(inpath, ret = FALSE) +combineIPR(inpath, ret = FALSE) } \arguments{ \item{ret}{} diff --git a/man/create_lineage_lookup.Rd b/man/createLineageLookup.Rd similarity index 91% rename from man/create_lineage_lookup.Rd rename to man/createLineageLookup.Rd index 51670f35..5dbab978 100644 --- a/man/create_lineage_lookup.Rd +++ b/man/createLineageLookup.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_lineage_lookup.R -\name{create_lineage_lookup} -\alias{create_lineage_lookup} +\name{createLineageLookup} +\alias{createLineageLookup} \title{Create a look up table that goes from TaxID, to Lineage} \usage{ -create_lineage_lookup( +createLineageLookup( lineage_file = here("data/rankedlineage.dmp"), outfile, taxonomic_rank = "phylum" diff --git a/man/get_proc_weights.Rd b/man/getProcessRuntimeWeights.Rd similarity index 73% rename from man/get_proc_weights.Rd rename to man/getProcessRuntimeWeights.Rd index 0f4beb57..8eff0347 100644 --- a/man/get_proc_weights.Rd +++ b/man/getProcessRuntimeWeights.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{get_proc_weights} -\alias{get_proc_weights} +\name{getProcessRuntimeWeights} +\alias{getProcessRuntimeWeights} \title{Quickly get the runtime weights for MolEvolvR backend processes} \usage{ -get_proc_weights(medians_yml_path = NULL) +getProcessRuntimeWeights(medians_yml_path = NULL) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results @@ -13,7 +13,7 @@ directory} \value{ \link{list} names: processes; values: median runtime (seconds) -example: get_proc_weights() +example: writeProcessRuntimeToYML() } \description{ Quickly get the runtime weights for MolEvolvR backend processes diff --git a/man/map_advanced_opts2procs.Rd b/man/mapAdvOption2Process.Rd similarity index 76% rename from man/map_advanced_opts2procs.Rd rename to man/mapAdvOption2Process.Rd index 631708b4..5bd9ee65 100644 --- a/man/map_advanced_opts2procs.Rd +++ b/man/mapAdvOption2Process.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{map_advanced_opts2procs} -\alias{map_advanced_opts2procs} +\name{mapAdvOption2Process} +\alias{mapAdvOption2Process} \title{Use MolEvolvR advanced options to get associated processes} \usage{ -map_advanced_opts2procs(advanced_opts) +mapAdvOption2Process(advanced_opts) } \arguments{ \item{advanced_opts}{character vector of MolEvolvR advanced options} @@ -15,7 +15,7 @@ the advanced options example: advanced_opts <- c("homology_search", "domain_architecture") -procs <- map_advanced_opts2procs(advanced_opts) +procs <- mapAdvOption2Process(advanced_opts) } \description{ Use MolEvolvR advanced options to get associated processes diff --git a/man/make_opts2procs.Rd b/man/mapOption2Process.Rd similarity index 75% rename from man/make_opts2procs.Rd rename to man/mapOption2Process.Rd index 07e208b2..ff6905c5 100644 --- a/man/make_opts2procs.Rd +++ b/man/mapOption2Process.Rd @@ -1,15 +1,15 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{make_opts2procs} -\alias{make_opts2procs} +\name{mapOption2Process} +\alias{mapOption2Process} \title{Construct list where names (MolEvolvR advanced options) point to processes} \usage{ -make_opts2procs() +mapOption2Process() } \value{ list where names (MolEvolvR advanced options) point to processes -example: list_opts2procs <- make_opts2procs +example: list_opts2procs <- mapOption2Process } \description{ Construct list where names (MolEvolvR advanced options) point to processes diff --git a/man/plot_estimated_walltimes.Rd b/man/plotEstimatedWallTimes.Rd similarity index 55% rename from man/plot_estimated_walltimes.Rd rename to man/plotEstimatedWallTimes.Rd index 3669e0e0..0d53cb32 100644 --- a/man/plot_estimated_walltimes.Rd +++ b/man/plotEstimatedWallTimes.Rd @@ -1,18 +1,19 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{plot_estimated_walltimes} -\alias{plot_estimated_walltimes} +\name{plotEstimatedWallTimes} +\alias{plotEstimatedWallTimes} \title{Plot the estimated runtimes for different advanced options and number of inputs} \usage{ -plot_estimated_walltimes() +plotEstimatedWallTimes() } \value{ line plot object example: -p <- plot_estimated_walltimes() -ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) +p <- plotEstimatedWallTimes() +ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_ +dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) } \description{ this function was just for fun; very, very messy code diff --git a/man/write_proc_medians_table.Rd b/man/writeProcessRuntime2TSV.Rd similarity index 77% rename from man/write_proc_medians_table.Rd rename to man/writeProcessRuntime2TSV.Rd index 2ae7a97b..03cbbd68 100644 --- a/man/write_proc_medians_table.Rd +++ b/man/writeProcessRuntime2TSV.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{write_proc_medians_table} -\alias{write_proc_medians_table} +\name{writeProcessRuntime2TSV} +\alias{writeProcessRuntime2TSV} \title{Write a table of 2 columns: 1) process and 2) median seconds} \usage{ -write_proc_medians_table(dir_job_results, filepath) +writeProcessRuntime2TSV(dir_job_results, filepath) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results} @@ -14,7 +14,7 @@ write_proc_medians_table(dir_job_results, filepath) \value{ \link{tbl_df} 2 columns: 1) process and 2) median seconds -example: write_proc_medians_table( +example: writeProcessRuntime2TSV( "/data/scratch/janani/molevolvr_out/", "/data/scratch/janani/molevolvr_out/log_tbl.tsv" ) diff --git a/man/write_proc_medians_yml.Rd b/man/writeProcessRuntimeToYML.Rd similarity index 61% rename from man/write_proc_medians_yml.Rd rename to man/writeProcessRuntimeToYML.Rd index a3d8ee5f..e4a5c8ad 100644 --- a/man/write_proc_medians_yml.Rd +++ b/man/writeProcessRuntimeToYML.Rd @@ -1,25 +1,26 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{write_proc_medians_yml} -\alias{write_proc_medians_yml} +\name{writeProcessRuntimeToYML} +\alias{writeProcessRuntimeToYML} \title{Compute median process runtimes, then write a YAML list of the processes and their median runtimes in seconds to the path specified by 'filepath'.} \usage{ -write_proc_medians_yml(dir_job_results, filepath = NULL) +writeProcessRuntimeToYML(dir_job_results, filepath = NULL) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results directory} -\item{filepath}{\link{chr} path to save YAML file; if NULL, uses ./molevol_scripts/log_data/job_proc_weights.yml} +\item{filepath}{\link{chr} path to save YAML file; if NULL, +uses ./molevol_scripts/log_data/job_proc_weights.yml} } \description{ The default value of filepath is the value of the env var -MOLEVOLVR_PROC_WEIGHTS, which get_proc_weights() also uses as its default +MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntimeToYML() also uses as its default read location. } \examples{ \dontrun{ -write_proc_medians_yml( +writeProcessRuntimeToYML( "/data/scratch/janani/molevolvr_out/", "/data/scratch/janani/molevolvr_out/log_tbl.yml" ) From 091d32ebb31b6f295268b4e0a38ef0fab1066358 Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Tue, 8 Oct 2024 07:17:56 +0100 Subject: [PATCH 11/41] fixing merge issue in NAMESPACE --- NAMESPACE | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/NAMESPACE b/NAMESPACE index 739c76d7..d2ef5463 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -29,6 +29,9 @@ export(cleanSpecies) export(combineFiles) export(combineFullAnalysis) export(combineIPR) +export(condenseRepeatedDomains) +export(convert2TitleCase) +export(convertAlignment2FA) export(convert_aln2fa) export(convert_fa2tre) export(count_bycol) @@ -63,13 +66,15 @@ export(lineage.domain_repeats.plot) export(lineage.neighbors.plot) export(lineage_sunburst) export(make_job_results_url) +export(mapAcc2Name) export(mapAdvOption2Process) export(mapOption2Process) -export(mapAcc2Name) +export(map_acc2name) export(msa_pdf) export(pick_longer_duplicate) export(plotEstimatedWallTimes) export(prot2tax) +export(prot2tax_old) export(removeAsterisks) export(removeEmptyRows) export(removeTails) From fc63187c4985d8a9fad15582691b4ee4f9c273e6 Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Tue, 8 Oct 2024 08:18:42 +0100 Subject: [PATCH 12/41] Added updated function name to NAMESPACE and removed unused argument in readAAStringSet --- NAMESPACE | 3 +-- R/msa.R | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index d2ef5463..cd135cc8 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -20,9 +20,9 @@ export(assert_count_df) export(assignJobQueue) export(calculateEstimatedWallTimeFromOptions) export(calculateProcessRuntime) -export(cleanGeneDescription) export(cleanClusters) export(cleanDomainArchitecture) +export(cleanGeneDescription) export(cleanGenomicContext) export(cleanLineage) export(cleanSpecies) @@ -71,7 +71,6 @@ export(mapAdvOption2Process) export(mapOption2Process) export(map_acc2name) export(msa_pdf) -export(pick_longer_duplicate) export(plotEstimatedWallTimes) export(prot2tax) export(prot2tax_old) diff --git a/R/msa.R b/R/msa.R index e56cc32c..0b1b6e34 100644 --- a/R/msa.R +++ b/R/msa.R @@ -197,21 +197,21 @@ msa_pdf <- function(fasta_path, out_path = NULL, #' #' @examples generate_msa <- function(fa_file = "", outfile = "") { - prot_aa <- readAAStringSet( - path = fa_file, - format = "fasta" - ) - prot_aa + prot_aa <- readAAStringSet( + fa_file, + format = "fasta" + ) + prot_aa - ## Install kalign ?rMSA_INSTALL - ## Messed up! Reimplement from kalign.R - ## https://github.com/mhahsler/rMSA/blob/master/R/kalign.R + ## Install kalign ?rMSA_INSTALL + ## Messed up! Reimplement from kalign.R + ## https://github.com/mhahsler/rMSA/blob/master/R/kalign.R - # source("scripts/c2r.R") + # source("scripts/c2r.R") - ## align the sequences - al <- kalign(prot_aa) # !! won't work! - al + ## align the sequences + al <- kalign(prot_aa) # !! won't work! + al } ############################ From 208b9e02d0bedfd6d16d663dfb109fcce23040ac Mon Sep 17 00:00:00 2001 From: teddyCodex Date: Tue, 8 Oct 2024 18:42:40 +0100 Subject: [PATCH 13/41] refactor function names in R/ipr2vis.R --- R/ipr2viz.R | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/R/ipr2viz.R b/R/ipr2viz.R index bf3650f7..5d8a0a03 100644 --- a/R/ipr2viz.R +++ b/R/ipr2viz.R @@ -13,7 +13,7 @@ ################################# ## Modified gggenes::theme_genes ################################# -## theme_genes2 adapted from theme_genes (w/o strip.text()) +## themeGenes2 adapted from theme_genes (w/o strip.text()) ## https://github.com/wilkox/gggenes/blob/master/R/theme_genes.R #' Theme Genes2 #' @@ -23,7 +23,7 @@ #' @export #' #' @examples -theme_genes2 <- function() { +themeGenes2 <- function() { ggplot2::theme_grey() + ggplot2::theme( panel.background = ggplot2::element_blank(), panel.grid.major.y = ggplot2::element_line(colour = "grey80", size = 0.2), @@ -58,7 +58,7 @@ theme_genes2 <- function() { #' @export #' #' @examples -find_top_acc <- function(infile_full, +getTopAccByLinDomArch <- function(infile_full, DA_col = "DomArch.Pfam", lin_col = "Lineage_short", n = 20, @@ -113,7 +113,7 @@ find_top_acc <- function(infile_full, #' @export #' #' @examples -ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), +plotIPR2Viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), analysis = c("Pfam", "Phobius", "TMHMM", "Gene3D"), group_by = "Analysis", # "Analysis" topn = 20, name = "Name", text_size = 15, query = "All") { @@ -141,8 +141,8 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), ## To filter by Analysis analysis <- paste(analysis, collapse = "|") ## @SAM: This can't be set in stone since the analysis may change! - ## Getting top n accession numbers using find_top_acc() - top_acc <- find_top_acc( + ## Getting top n accession numbers using getTopAccByLinDomArch() + top_acc <- getTopAccByLinDomArch( infile_full = infile_full, DA_col = "DomArch.Pfam", ## @SAM, you could pick by the Analysis w/ max rows! @@ -202,7 +202,7 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), # , ncol = 1 + #scales = "free", scale_fill_manual(values = CPCOLS, na.value = "#A9A9A9") + theme_minimal() + - theme_genes2() + + themeGenes2() + theme( legend.position = "bottom", legend.box = "horizontal", @@ -232,7 +232,7 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), ) + scale_fill_manual(values = CPCOLS, na.value = "#A9A9A9") + theme_minimal() + - theme_genes2() + + themeGenes2() + theme( legend.position = "bottom", legend.box = "horizontal", @@ -268,7 +268,7 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), #' @export #' #' @examples -ipr2viz_web <- function(infile_ipr, +plotIPR2VizWeb <- function(infile_ipr, accessions, analysis = c("Pfam", "Phobius", "TMHMM", "Gene3D"), group_by = "Analysis", name = "Name", @@ -344,7 +344,7 @@ ipr2viz_web <- function(infile_ipr, # , ncol = 1 + #scales = "free", scale_fill_manual(values = CPCOLS, na.value = "#A9A9A9") + theme_minimal() + - theme_genes2() + + themeGenes2() + theme( legend.position = "bottom", legend.box = "horizontal", @@ -374,7 +374,7 @@ ipr2viz_web <- function(infile_ipr, ) + scale_fill_manual(values = CPCOLS, na.value = "#A9A9A9") + theme_minimal() + - theme_genes2() + + themeGenes2() + theme( legend.position = "bottom", legend.box = "horizontal", From 44f0a766f29b36cdab6d7fbddc9c31cd4d0df20d Mon Sep 17 00:00:00 2001 From: teddyCodex Date: Tue, 8 Oct 2024 18:51:23 +0100 Subject: [PATCH 14/41] update namespace and rd files with roxygen2 --- NAMESPACE | 8 ++-- man/countbycolumn.Rd | 22 ---------- man/filterbydomains.Rd | 44 ------------------- man/filterbyfrequency.Rd | 22 ---------- man/findparalogs.Rd | 26 ----------- ...nd_top_acc.Rd => getTopAccByLinDomArch.Rd} | 6 +-- man/{ipr2viz.Rd => plotIPR2Viz.Rd} | 6 +-- man/{ipr2viz_web.Rd => plotIPR2VizWeb.Rd} | 6 +-- man/summarizebylineage.Rd | 25 ----------- man/{theme_genes2.Rd => themeGenes2.Rd} | 6 +-- man/totalgencontextordomarchcounts.Rd | 42 ------------------ man/words2wordcounts.Rd | 25 ----------- 12 files changed, 16 insertions(+), 222 deletions(-) delete mode 100644 man/countbycolumn.Rd delete mode 100644 man/filterbydomains.Rd delete mode 100644 man/filterbyfrequency.Rd delete mode 100644 man/findparalogs.Rd rename man/{find_top_acc.Rd => getTopAccByLinDomArch.Rd} (79%) rename man/{ipr2viz.Rd => plotIPR2Viz.Rd} (87%) rename man/{ipr2viz_web.Rd => plotIPR2VizWeb.Rd} (85%) delete mode 100644 man/summarizebylineage.Rd rename man/{theme_genes2.Rd => themeGenes2.Rd} (72%) delete mode 100644 man/totalgencontextordomarchcounts.Rd delete mode 100644 man/words2wordcounts.Rd diff --git a/NAMESPACE b/NAMESPACE index 53332439..ddbd1dd5 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -46,22 +46,22 @@ export(extractAccNum) export(filterByDomains) export(filterByFrequency) export(findParalogs) -export(find_top_acc) export(formatJobArgumentsHTML) export(gc_undirected_network) export(generateAllAlignments2FA) export(generate_all_aln2fa) export(generate_msa) +export(getTopAccByLinDomArch) export(get_accnums_from_fasta_file) export(get_proc_medians) export(get_proc_weights) -export(ipr2viz) -export(ipr2viz_web) export(make_opts2procs) export(mapAcc2Name) export(map_acc2name) export(map_advanced_opts2procs) export(msa_pdf) +export(plotIPR2Viz) +export(plotIPR2VizWeb) export(plotLineageDA) export(plotLineageDomainRepeats) export(plotLineageHeatmap) @@ -97,7 +97,7 @@ export(summarizeDomArch_ByLineage) export(summarizeGenContext) export(summarizeGenContext_ByDomArchLineage) export(summarizeGenContext_ByLineage) -export(theme_genes2) +export(themeGenes2) export(to_titlecase) export(totalGenContextOrDomArchCounts) export(validateCountDF) diff --git a/man/countbycolumn.Rd b/man/countbycolumn.Rd deleted file mode 100644 index 34fcc3e0..00000000 --- a/man/countbycolumn.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{countByColumn} -\alias{countByColumn} -\title{Count By Column} -\usage{ -countByColumn(prot = prot, column = "DomArch", min.freq = 1) -} -\arguments{ -\item{min.freq}{} -} -\value{ -Describe return, in detail -} -\description{ -Count By Column -} -\examples{ -\dontrun{ -countByColumn() -} -} diff --git a/man/filterbydomains.Rd b/man/filterbydomains.Rd deleted file mode 100644 index 8c885363..00000000 --- a/man/filterbydomains.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{filterByDomains} -\alias{filterByDomains} -\title{Filter by Domains} -\usage{ -filterByDomains( - prot, - column = "DomArch", - doms_keep = c(), - doms_remove = c(), - ignore.case = FALSE -) -} -\arguments{ -\item{prot}{Dataframe to filter} - -\item{column}{Column to search for domains in (DomArch column)} - -\item{doms_keep}{Vector of domains that must be identified within column in order for -observation to be kept} - -\item{doms_remove}{Vector of domains that, if found within an observation, will be removed} - -\item{ignore.case}{Should the matching be non case sensitive} -} -\value{ -Filtered data frame -} -\description{ -filterByDomains filters a data frame by identifying exact domain matches -and either keeping or removing rows with the identified domain -} -\note{ -There is no need to make the domains 'regex safe', that will be handled by this function -} -\examples{ -\dontrun{ -filterByDomains() -} -} -\author{ -Samuel Chen, Janani Ravi -} diff --git a/man/filterbyfrequency.Rd b/man/filterbyfrequency.Rd deleted file mode 100644 index d2c5f9cd..00000000 --- a/man/filterbyfrequency.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{filterByFrequency} -\alias{filterByFrequency} -\title{Filter Frequency} -\usage{ -filterByFrequency(x, min.freq) -} -\arguments{ -\item{min.freq}{} -} -\value{ -Describe return, in detail -} -\description{ -Filter Frequency -} -\examples{ -\dontrun{ -filterByFrequency() -} -} diff --git a/man/findparalogs.Rd b/man/findparalogs.Rd deleted file mode 100644 index 4b5edbcf..00000000 --- a/man/findparalogs.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{findParalogs} -\alias{findParalogs} -\title{Find Paralogs} -\usage{ -findParalogs(prot) -} -\arguments{ -\item{prot}{A data frame filtered by a Query, containing columns Species and Lineage} -} -\value{ -returns a dataframe containing paralogs and the counts. -} -\description{ -Creates a data frame of paralogs. -} -\note{ -Please refer to the source code if you have alternate file formats and/or -column names. -} -\examples{ -\dontrun{ -findParalogs(pspa) -} -} diff --git a/man/find_top_acc.Rd b/man/getTopAccByLinDomArch.Rd similarity index 79% rename from man/find_top_acc.Rd rename to man/getTopAccByLinDomArch.Rd index 780cde11..a00da5c7 100644 --- a/man/find_top_acc.Rd +++ b/man/getTopAccByLinDomArch.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/ipr2viz.R -\name{find_top_acc} -\alias{find_top_acc} +\name{getTopAccByLinDomArch} +\alias{getTopAccByLinDomArch} \title{Group by lineage + DA then take top 20} \usage{ -find_top_acc( +getTopAccByLinDomArch( infile_full, DA_col = "DomArch.Pfam", lin_col = "Lineage_short", diff --git a/man/ipr2viz.Rd b/man/plotIPR2Viz.Rd similarity index 87% rename from man/ipr2viz.Rd rename to man/plotIPR2Viz.Rd index 79063497..22297312 100644 --- a/man/ipr2viz.Rd +++ b/man/plotIPR2Viz.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/ipr2viz.R -\name{ipr2viz} -\alias{ipr2viz} +\name{plotIPR2Viz} +\alias{plotIPR2Viz} \title{IPR2Viz} \usage{ -ipr2viz( +plotIPR2Viz( infile_ipr = NULL, infile_full = NULL, accessions = c(), diff --git a/man/ipr2viz_web.Rd b/man/plotIPR2VizWeb.Rd similarity index 85% rename from man/ipr2viz_web.Rd rename to man/plotIPR2VizWeb.Rd index 896445bd..4b4394ad 100644 --- a/man/ipr2viz_web.Rd +++ b/man/plotIPR2VizWeb.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/ipr2viz.R -\name{ipr2viz_web} -\alias{ipr2viz_web} +\name{plotIPR2VizWeb} +\alias{plotIPR2VizWeb} \title{IPR2Viz Web} \usage{ -ipr2viz_web( +plotIPR2VizWeb( infile_ipr, accessions, analysis = c("Pfam", "Phobius", "TMHMM", "Gene3D"), diff --git a/man/summarizebylineage.Rd b/man/summarizebylineage.Rd deleted file mode 100644 index 2e445913..00000000 --- a/man/summarizebylineage.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeByLineage} -\alias{summarizeByLineage} -\title{Summarize by Lineage} -\usage{ -summarizeByLineage(prot = "prot", column = "DomArch", by = "Lineage", query) -} -\arguments{ -\item{query}{} -} -\value{ -Describe return, in detail -} -\description{ -Summarize by Lineage -} -\examples{ -\dontrun{ -library(tidyverse) -tibble(DomArch = c("a+b", "a+b", "b+c", "a+b"), Lineage = c("l1", "l1", "l1", "l2")) |> - summarizeByLineage(query = "all") -} - -} diff --git a/man/theme_genes2.Rd b/man/themeGenes2.Rd similarity index 72% rename from man/theme_genes2.Rd rename to man/themeGenes2.Rd index 29f79673..1553e019 100644 --- a/man/theme_genes2.Rd +++ b/man/themeGenes2.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/ipr2viz.R -\name{theme_genes2} -\alias{theme_genes2} +\name{themeGenes2} +\alias{themeGenes2} \title{Theme Genes2} \usage{ -theme_genes2() +themeGenes2() } \description{ Theme Genes2 diff --git a/man/totalgencontextordomarchcounts.Rd b/man/totalgencontextordomarchcounts.Rd deleted file mode 100644 index f457cb6a..00000000 --- a/man/totalgencontextordomarchcounts.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{totalGenContextOrDomArchCounts} -\alias{totalGenContextOrDomArchCounts} -\title{Total Counts} -\usage{ -totalGenContextOrDomArchCounts( - prot, - column = "DomArch", - lineage_col = "Lineage", - cutoff = 90, - RowsCutoff = FALSE, - digits = 2 -) -} -\arguments{ -\item{prot}{A data frame that must contain columns: -\itemize{\item Either 'GenContext' or 'DomArch.norep' \item count}} - -\item{column}{Character. The column to summarize} - -\item{cutoff}{Numeric. Cutoff for total count. Counts below cutoff value will not be shown. Default is 0.} - -\item{digits}{} -} -\value{ -Define return, in detail -} -\description{ -Creates a data frame with a totalcount column - -This function is designed to sum the counts column by either Genomic Context or Domain Architecture and creates a totalcount column from those sums. -} -\note{ -Please refer to the source code if you have alternate file formats and/or -column names. -} -\examples{ -\dontrun{ -totalGenContextOrDomArchCounts(pspa - gc_lin_counts, 0, "GC") -} -} diff --git a/man/words2wordcounts.Rd b/man/words2wordcounts.Rd deleted file mode 100644 index 7f60f226..00000000 --- a/man/words2wordcounts.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{words2WordCounts} -\alias{words2WordCounts} -\title{Words 2 Word Counts} -\usage{ -words2WordCounts(string) -} -\arguments{ -\item{string}{} -} -\value{ -\link{tbl_df} table with 2 columns: 1) words & 2) counts/frequency -} -\description{ -Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} -} -\examples{ -\dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> - elements2Words() |> - words2WordCounts() -} - -} From a246339f47d37ff60bdfb76a6861807b546c93f3 Mon Sep 17 00:00:00 2001 From: teddyCodex Date: Tue, 8 Oct 2024 23:36:16 +0100 Subject: [PATCH 15/41] refactor function names in R/pre-msa-tree and R/reverse-operons.R --- NAMESPACE | 7 ++- R/pre-msa-tree.R | 16 +++---- R/reverse_operons.R | 12 ++--- man/RepresentativeAccNums.Rd | 4 +- man/countbycolumn.Rd | 22 ---------- man/createRepresentativeAccNum.Rd | 27 ++++++++++++ man/filterbydomains.Rd | 44 ------------------- man/filterbyfrequency.Rd | 22 ---------- man/findparalogs.Rd | 26 ----------- man/getAccNumFromFA.Rd | 14 ++++++ man/get_accnums_from_fasta_file.Rd | 6 +-- man/{reveql.Rd => reverseOperonSeq.Rd} | 10 ++--- ...verse_operon.Rd => straightenOperonSeq.Rd} | 10 ++--- man/summarizebylineage.Rd | 25 ----------- man/totalgencontextordomarchcounts.Rd | 42 ------------------ man/words2wordcounts.Rd | 25 ----------- man/write.MsaAAMultipleAlignment.Rd | 8 +--- man/writeMSA_AA2FA.Rd | 21 +++++++++ 18 files changed, 94 insertions(+), 247 deletions(-) delete mode 100644 man/countbycolumn.Rd create mode 100644 man/createRepresentativeAccNum.Rd delete mode 100644 man/filterbydomains.Rd delete mode 100644 man/filterbyfrequency.Rd delete mode 100644 man/findparalogs.Rd create mode 100644 man/getAccNumFromFA.Rd rename man/{reveql.Rd => reverseOperonSeq.Rd} (56%) rename man/{reverse_operon.Rd => straightenOperonSeq.Rd} (53%) delete mode 100644 man/summarizebylineage.Rd delete mode 100644 man/totalgencontextordomarchcounts.Rd delete mode 100644 man/words2wordcounts.Rd create mode 100644 man/writeMSA_AA2FA.Rd diff --git a/NAMESPACE b/NAMESPACE index 53332439..fe2ad999 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -36,6 +36,7 @@ export(countByColumn) export(createFA2Tree) export(createJobResultsURL) export(createJobStatusEmailMessage) +export(createRepresentativeAccNum) export(createWordCloud2Element) export(createWordCloudElement) export(create_lineage_lookup) @@ -52,6 +53,7 @@ export(gc_undirected_network) export(generateAllAlignments2FA) export(generate_all_aln2fa) export(generate_msa) +export(getAccNumFromFA) export(get_accnums_from_fasta_file) export(get_proc_medians) export(get_proc_weights) @@ -83,14 +85,14 @@ export(removeTails) export(renameFA) export(rename_fasta) export(replaceQuestionMarks) -export(reveql) -export(reverse_operon) +export(reverseOperonSeq) export(run_deltablast) export(run_rpsblast) export(selectLongestDuplicate) export(sendJobStatusEmail) export(shortenLineage) export(sinkReset) +export(straightenOperonSeq) export(summarizeByLineage) export(summarizeDomArch) export(summarizeDomArch_ByLineage) @@ -103,6 +105,7 @@ export(totalGenContextOrDomArchCounts) export(validateCountDF) export(wordcloud3) export(write.MsaAAMultipleAlignment) +export(writeMSA_AA2FA) export(write_proc_medians_table) export(write_proc_medians_yml) importFrom(Biostrings,AAStringSet) diff --git a/R/pre-msa-tree.R b/R/pre-msa-tree.R index 44979c3c..fed495f4 100644 --- a/R/pre-msa-tree.R +++ b/R/pre-msa-tree.R @@ -546,7 +546,7 @@ acc2fa <- function(accessions, outpath, plan = "sequential") { return(result) } -#' RepresentativeAccNums +#' createRepresentativeAccNum #' #' @description #' Function to generate a vector of one Accession number per distinct observation from 'reduced' column @@ -566,7 +566,7 @@ acc2fa <- function(accessions, outpath, plan = "sequential") { #' @export #' #' @examples -RepresentativeAccNums <- function(prot_data, +createRepresentativeAccNum <- function(prot_data, reduced = "Lineage", accnum_col = "AccNum") { # Get Unique reduced column and then bind the AccNums back to get one AccNum per reduced column @@ -623,15 +623,15 @@ alignFasta <- function(fasta_file, tool = "Muscle", outpath = NULL) { ) if (typeof(outpath) == "character") { - write.MsaAAMultipleAlignment(aligned, outpath) + writeMSA_AA2FA(aligned, outpath) } return(aligned) } -#' write.MsaAAMultipleAlignment +#' writeMSA_AA2FA #' #' @description -#' Write MsaAAMultpleAlignment Objects as algined fasta sequence +#' Write MsaAAMultpleAlignment Objects as aligned fasta sequence #' MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega #' and msaMuscle from the 'msa' package #' @@ -647,7 +647,7 @@ alignFasta <- function(fasta_file, tool = "Muscle", outpath = NULL) { #' @export #' #' @examples -write.MsaAAMultipleAlignment <- function(alignment, outpath) { +writeMSA_AA2FA <- function(alignment, outpath) { l <- length(rownames(alignment)) fasta <- "" for (i in 1:l) @@ -660,7 +660,7 @@ write.MsaAAMultipleAlignment <- function(alignment, outpath) { return(fasta) } -#' get_accnums_from_fasta_file +#' getAccNumFromFA #' #' @param fasta_file #' @@ -671,7 +671,7 @@ write.MsaAAMultipleAlignment <- function(alignment, outpath) { #' @export #' #' @examples -get_accnums_from_fasta_file <- function(fasta_file) { +getAccNumFromFA <- function(fasta_file) { txt <- read_file(fasta_file) accnums <- stringi::stri_extract_all_regex(fasta_file, "(?<=>)[\\w,.]+")[[1]] return(accnums) diff --git a/R/reverse_operons.R b/R/reverse_operons.R index e4bbd50e..a2570e8d 100755 --- a/R/reverse_operons.R +++ b/R/reverse_operons.R @@ -3,7 +3,7 @@ # Modified by Janani Ravi and Samuel Chen -#' reveql +#' straightenOperonSeq #' #' @param prot #' @@ -11,7 +11,7 @@ #' @export #' #' @examples -reveql <- function(prot) { +straightenOperonSeq <- function(prot) { w <- prot # $GenContext.orig # was 'x' y <- rep(NA, length(w)) @@ -57,7 +57,7 @@ reveql <- function(prot) { ## The function to reverse operons -#' reverse_operon +#' reverseOperonSeq #' #' @param prot #' @@ -65,7 +65,7 @@ reveql <- function(prot) { #' @export #' #' @examples -reverse_operon <- function(prot) { +reverseOperonSeq <- function(prot) { gencontext <- prot$GenContext gencontext <- gsub(pattern = ">", replacement = ">|", x = gencontext) @@ -108,7 +108,7 @@ reverse_operon <- function(prot) { - ge <- lapply(1:length(ge), function(x) reveql(ge[[x]])) + ge <- lapply(1:length(ge), function(x) straightenOperonSeq(ge[[x]])) ye <- te[withouteq] @@ -141,4 +141,4 @@ reverse_operon <- function(prot) { # colnames(prot) <- c("AccNum","GenContext.orig","len", "GeneName","TaxID","Species") ## ??? straighten operons -# prot$GenContext.orig <- reverse_operon(prot) +# prot$GenContext.orig <- reverseOperonSeq(prot) diff --git a/man/RepresentativeAccNums.Rd b/man/RepresentativeAccNums.Rd index f617cde4..57d1f1ab 100644 --- a/man/RepresentativeAccNums.Rd +++ b/man/RepresentativeAccNums.Rd @@ -1,11 +1,9 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R \name{RepresentativeAccNums} \alias{RepresentativeAccNums} \title{Function to generate a vector of one Accession number per distinct observation from 'reduced' column} \usage{ -RepresentativeAccNums(prot_data, reduced = "Lineage", accnum_col = "AccNum") - RepresentativeAccNums(prot_data, reduced = "Lineage", accnum_col = "AccNum") } \arguments{ diff --git a/man/countbycolumn.Rd b/man/countbycolumn.Rd deleted file mode 100644 index 34fcc3e0..00000000 --- a/man/countbycolumn.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{countByColumn} -\alias{countByColumn} -\title{Count By Column} -\usage{ -countByColumn(prot = prot, column = "DomArch", min.freq = 1) -} -\arguments{ -\item{min.freq}{} -} -\value{ -Describe return, in detail -} -\description{ -Count By Column -} -\examples{ -\dontrun{ -countByColumn() -} -} diff --git a/man/createRepresentativeAccNum.Rd b/man/createRepresentativeAccNum.Rd new file mode 100644 index 00000000..3703fe1a --- /dev/null +++ b/man/createRepresentativeAccNum.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pre-msa-tree.R +\name{createRepresentativeAccNum} +\alias{createRepresentativeAccNum} +\title{createRepresentativeAccNum} +\usage{ +createRepresentativeAccNum( + prot_data, + reduced = "Lineage", + accnum_col = "AccNum" +) +} +\arguments{ +\item{prot_data}{Data frame containing Accession Numbers} + +\item{reduced}{Column from prot_data from which distinct observations +will be generated from. +One accession number will be assigned for each of these observations} + +\item{accnum_col}{Column from prot_data that contains Accession Numbers} +} +\description{ +Function to generate a vector of one Accession number per distinct observation from 'reduced' column +} +\author{ +Samuel Chen, Janani Ravi +} diff --git a/man/filterbydomains.Rd b/man/filterbydomains.Rd deleted file mode 100644 index 8c885363..00000000 --- a/man/filterbydomains.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{filterByDomains} -\alias{filterByDomains} -\title{Filter by Domains} -\usage{ -filterByDomains( - prot, - column = "DomArch", - doms_keep = c(), - doms_remove = c(), - ignore.case = FALSE -) -} -\arguments{ -\item{prot}{Dataframe to filter} - -\item{column}{Column to search for domains in (DomArch column)} - -\item{doms_keep}{Vector of domains that must be identified within column in order for -observation to be kept} - -\item{doms_remove}{Vector of domains that, if found within an observation, will be removed} - -\item{ignore.case}{Should the matching be non case sensitive} -} -\value{ -Filtered data frame -} -\description{ -filterByDomains filters a data frame by identifying exact domain matches -and either keeping or removing rows with the identified domain -} -\note{ -There is no need to make the domains 'regex safe', that will be handled by this function -} -\examples{ -\dontrun{ -filterByDomains() -} -} -\author{ -Samuel Chen, Janani Ravi -} diff --git a/man/filterbyfrequency.Rd b/man/filterbyfrequency.Rd deleted file mode 100644 index d2c5f9cd..00000000 --- a/man/filterbyfrequency.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{filterByFrequency} -\alias{filterByFrequency} -\title{Filter Frequency} -\usage{ -filterByFrequency(x, min.freq) -} -\arguments{ -\item{min.freq}{} -} -\value{ -Describe return, in detail -} -\description{ -Filter Frequency -} -\examples{ -\dontrun{ -filterByFrequency() -} -} diff --git a/man/findparalogs.Rd b/man/findparalogs.Rd deleted file mode 100644 index 4b5edbcf..00000000 --- a/man/findparalogs.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{findParalogs} -\alias{findParalogs} -\title{Find Paralogs} -\usage{ -findParalogs(prot) -} -\arguments{ -\item{prot}{A data frame filtered by a Query, containing columns Species and Lineage} -} -\value{ -returns a dataframe containing paralogs and the counts. -} -\description{ -Creates a data frame of paralogs. -} -\note{ -Please refer to the source code if you have alternate file formats and/or -column names. -} -\examples{ -\dontrun{ -findParalogs(pspa) -} -} diff --git a/man/getAccNumFromFA.Rd b/man/getAccNumFromFA.Rd new file mode 100644 index 00000000..f2409965 --- /dev/null +++ b/man/getAccNumFromFA.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pre-msa-tree.R +\name{getAccNumFromFA} +\alias{getAccNumFromFA} +\title{getAccNumFromFA} +\usage{ +getAccNumFromFA(fasta_file) +} +\arguments{ +\item{fasta_file}{} +} +\description{ +getAccNumFromFA +} diff --git a/man/get_accnums_from_fasta_file.Rd b/man/get_accnums_from_fasta_file.Rd index 84c163cc..f545d1a0 100644 --- a/man/get_accnums_from_fasta_file.Rd +++ b/man/get_accnums_from_fasta_file.Rd @@ -1,11 +1,9 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R \name{get_accnums_from_fasta_file} \alias{get_accnums_from_fasta_file} \title{Get accnums from fasta file} \usage{ -get_accnums_from_fasta_file(fasta_file) - get_accnums_from_fasta_file(fasta_file) } \arguments{ @@ -13,6 +11,4 @@ get_accnums_from_fasta_file(fasta_file) } \description{ Get accnums from fasta file - -get_accnums_from_fasta_file } diff --git a/man/reveql.Rd b/man/reverseOperonSeq.Rd similarity index 56% rename from man/reveql.Rd rename to man/reverseOperonSeq.Rd index 9dc2bcb8..d61ec5f2 100644 --- a/man/reveql.Rd +++ b/man/reverseOperonSeq.Rd @@ -1,14 +1,14 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/reverse_operons.R -\name{reveql} -\alias{reveql} -\title{reveql} +\name{reverseOperonSeq} +\alias{reverseOperonSeq} +\title{reverseOperonSeq} \usage{ -reveql(prot) +reverseOperonSeq(prot) } \arguments{ \item{prot}{} } \description{ -reveql +reverseOperonSeq } diff --git a/man/reverse_operon.Rd b/man/straightenOperonSeq.Rd similarity index 53% rename from man/reverse_operon.Rd rename to man/straightenOperonSeq.Rd index 270e2a62..fcd0c923 100644 --- a/man/reverse_operon.Rd +++ b/man/straightenOperonSeq.Rd @@ -1,14 +1,14 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/reverse_operons.R -\name{reverse_operon} -\alias{reverse_operon} -\title{reverse_operon} +\name{straightenOperonSeq} +\alias{straightenOperonSeq} +\title{straightenOperonSeq} \usage{ -reverse_operon(prot) +straightenOperonSeq(prot) } \arguments{ \item{prot}{} } \description{ -reverse_operon +straightenOperonSeq } diff --git a/man/summarizebylineage.Rd b/man/summarizebylineage.Rd deleted file mode 100644 index 2e445913..00000000 --- a/man/summarizebylineage.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeByLineage} -\alias{summarizeByLineage} -\title{Summarize by Lineage} -\usage{ -summarizeByLineage(prot = "prot", column = "DomArch", by = "Lineage", query) -} -\arguments{ -\item{query}{} -} -\value{ -Describe return, in detail -} -\description{ -Summarize by Lineage -} -\examples{ -\dontrun{ -library(tidyverse) -tibble(DomArch = c("a+b", "a+b", "b+c", "a+b"), Lineage = c("l1", "l1", "l1", "l2")) |> - summarizeByLineage(query = "all") -} - -} diff --git a/man/totalgencontextordomarchcounts.Rd b/man/totalgencontextordomarchcounts.Rd deleted file mode 100644 index f457cb6a..00000000 --- a/man/totalgencontextordomarchcounts.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{totalGenContextOrDomArchCounts} -\alias{totalGenContextOrDomArchCounts} -\title{Total Counts} -\usage{ -totalGenContextOrDomArchCounts( - prot, - column = "DomArch", - lineage_col = "Lineage", - cutoff = 90, - RowsCutoff = FALSE, - digits = 2 -) -} -\arguments{ -\item{prot}{A data frame that must contain columns: -\itemize{\item Either 'GenContext' or 'DomArch.norep' \item count}} - -\item{column}{Character. The column to summarize} - -\item{cutoff}{Numeric. Cutoff for total count. Counts below cutoff value will not be shown. Default is 0.} - -\item{digits}{} -} -\value{ -Define return, in detail -} -\description{ -Creates a data frame with a totalcount column - -This function is designed to sum the counts column by either Genomic Context or Domain Architecture and creates a totalcount column from those sums. -} -\note{ -Please refer to the source code if you have alternate file formats and/or -column names. -} -\examples{ -\dontrun{ -totalGenContextOrDomArchCounts(pspa - gc_lin_counts, 0, "GC") -} -} diff --git a/man/words2wordcounts.Rd b/man/words2wordcounts.Rd deleted file mode 100644 index 7f60f226..00000000 --- a/man/words2wordcounts.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{words2WordCounts} -\alias{words2WordCounts} -\title{Words 2 Word Counts} -\usage{ -words2WordCounts(string) -} -\arguments{ -\item{string}{} -} -\value{ -\link{tbl_df} table with 2 columns: 1) words & 2) counts/frequency -} -\description{ -Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} -} -\examples{ -\dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> - elements2Words() |> - words2WordCounts() -} - -} diff --git a/man/write.MsaAAMultipleAlignment.Rd b/man/write.MsaAAMultipleAlignment.Rd index 17a05f50..e26f26e7 100644 --- a/man/write.MsaAAMultipleAlignment.Rd +++ b/man/write.MsaAAMultipleAlignment.Rd @@ -1,11 +1,9 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R \name{write.MsaAAMultipleAlignment} \alias{write.MsaAAMultipleAlignment} \title{Write MsaAAMultpleAlignment Objects as algined fasta sequence} \usage{ -write.MsaAAMultipleAlignment(alignment, outpath) - write.MsaAAMultipleAlignment(alignment, outpath) } \arguments{ @@ -16,10 +14,6 @@ write.MsaAAMultipleAlignment(alignment, outpath) \description{ MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega and msaMuscle from the 'msa' package - -Write MsaAAMultpleAlignment Objects as algined fasta sequence -MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega -and msaMuscle from the 'msa' package } \author{ Samuel Chen, Janani Ravi diff --git a/man/writeMSA_AA2FA.Rd b/man/writeMSA_AA2FA.Rd new file mode 100644 index 00000000..068e5b63 --- /dev/null +++ b/man/writeMSA_AA2FA.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pre-msa-tree.R +\name{writeMSA_AA2FA} +\alias{writeMSA_AA2FA} +\title{writeMSA_AA2FA} +\usage{ +writeMSA_AA2FA(alignment, outpath) +} +\arguments{ +\item{alignment}{MsaAAMultipleAlignment object to be written as a fasta} + +\item{outpath}{Where the resulting FASTA file should be written to} +} +\description{ +Write MsaAAMultpleAlignment Objects as aligned fasta sequence +MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega +and msaMuscle from the 'msa' package +} +\author{ +Samuel Chen, Janani Ravi +} From 38f3cb000ddf35028c1e7c940920dd051db1a2dc Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Wed, 9 Oct 2024 11:32:03 +0100 Subject: [PATCH 16/41] added error handling functionality for the run_deltablast and run_rpsblast functions. This includes arguments check before wrapping code logic in a tryCatch block. --- R/blastWrappers.R | 109 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 84 insertions(+), 25 deletions(-) diff --git a/R/blastWrappers.R b/R/blastWrappers.R index 552b1ff6..15484a1b 100755 --- a/R/blastWrappers.R +++ b/R/blastWrappers.R @@ -18,25 +18,56 @@ #' #' @examples run_deltablast <- function(deltablast_path, db_search_path, - db = "refseq", query, evalue = "1e-5", - out, num_alignments, num_threads = 1) { - start <- Sys.time() + db = "refseq", query, evalue = "1e-5", + out, num_alignments, num_threads = 1) { + # Argument validation + if (!file.exists(deltablast_path)) { + stop("The DELTABLAST executable path is invalid: ", deltablast_path) + } + if (!dir.exists(db_search_path)) { + stop("The database search path is invalid: ", db_search_path) + } + if (!file.exists(query)) { + stop("The query file path is invalid: ", query) + } + if (!is.numeric(as.numeric(evalue)) || as.numeric(evalue) <= 0) { + stop("The evalue must be a positive number: ", evalue) + } + if (!is.numeric(num_alignments) || num_alignments <= 0) { + stop("The number of alignments must be a + positive integer: ", num_alignments) + } + if (!is.numeric(num_threads) || num_threads <= 0) { + stop("The number of threads must be a positive integer: ", num_threads) + } + + start <- Sys.time() + + tryCatch({ system(paste0("export BLASTDB=/", db_search_path)) system2( - command = deltablast_path, - args = c( - "-db", db, - "-query", query, - "-evalue", evalue, - "-out", out, - "-num_threads", num_threads, - "-num_alignments", num_alignments - # ,"-outfmt", outfmt - ) + command = deltablast_path, + args = c( + "-db", db, + "-query", query, + "-evalue", evalue, + "-out", out, + "-num_threads", num_threads, + "-num_alignments", num_alignments + # ,"-outfmt", outfmt + ) ) print(Sys.time() - start) + }, error = function(e) { + message(paste("Error in run_deltablast: ", e)) + }, warning = function(w) { + message(paste("Warning in run_deltablast: ", w)) + }, finally = { + message("run_deltablast completed") + }) + } @@ -55,20 +86,48 @@ run_deltablast <- function(deltablast_path, db_search_path, #' #' @examples run_rpsblast <- function(rpsblast_path, db_search_path, - db = "refseq", query, evalue = "1e-5", - out, num_threads = 1) { - start <- Sys.time() + db = "refseq", query, evalue = "1e-5", + out, num_threads = 1) { + # Argument validation + if (!file.exists(rpsblast_path)) { + stop("The RPSBLAST executable path is invalid: ", rpsblast_path) + } + if (!dir.exists(db_search_path)) { + stop("The database search path is invalid: ", db_search_path) + } + if (!file.exists(query)) { + stop("The query file path is invalid: ", query) + } + if (!is.numeric(as.numeric(evalue)) || as.numeric(evalue) <= 0) { + stop("The evalue must be a positive number: ", evalue) + } + if (!is.numeric(num_threads) || num_threads <= 0) { + stop("The number of threads must be a positive integer: ", num_threads) + } + + start <- Sys.time() + + tryCatch({ + system(paste0("export BLASTDB=/", db_search_path)) + system2( - command = rpsblast_path, - args = c( - "-db", db, - "-query", query, - "-evalue", evalue, - "-out", out, - "-num_threads", num_threads - # , "-outfmt", outfmt - ) + command = rpsblast_path, + args = c( + "-db", db, + "-query", query, + "-evalue", evalue, + "-out", out, + "-num_threads", num_threads + ) ) print(Sys.time() - start) + }, error = function(e) { + message(paste("Error in run_rpsblast: ", e)) + }, warning = function(w) { + message(paste("Warning in run_rpsblast: ", w)) + }, finally = { + message("run_rpsblast completed") + }) + } From 2998172af60027b383f6c7da1d6e50213e5bc3f3 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Wed, 9 Oct 2024 11:43:34 -0600 Subject: [PATCH 17/41] additional ipr2viz() global variable definitions - .data for NSE - reference internal MolEvolvR pkg data for`iprscan_cols` and `lookup_table_cols` - add Rd tags for `stats::as.formula()` and `ggplot2::unit()` Resolves: https://github.com/JRaviLab/MolEvolvR/pull/64/files#r1793916461 --- NAMESPACE | 2 ++ R/ipr2viz.R | 31 ++++++++++++++++--------------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 2c5597c6..1d4f7f86 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -173,6 +173,7 @@ importFrom(ggplot2,theme) importFrom(ggplot2,theme_classic) importFrom(ggplot2,theme_grey) importFrom(ggplot2,theme_minimal) +importFrom(ggplot2,unit) importFrom(ggplot2,xlab) importFrom(ggplot2,ylab) importFrom(grDevices,adjustcolor) @@ -237,6 +238,7 @@ importFrom(sendmailR,sendmail) importFrom(seqinr,dist.alignment) importFrom(seqinr,read.alignment) importFrom(shiny,showNotification) +importFrom(stats,as.formula) importFrom(stats,complete.cases) importFrom(stats,logLik) importFrom(stats,na.omit) diff --git a/R/ipr2viz.R b/R/ipr2viz.R index b0db06f9..0d417be0 100644 --- a/R/ipr2viz.R +++ b/R/ipr2viz.R @@ -106,9 +106,10 @@ find_top_acc <- function(infile_full, #' #' @importFrom dplyr distinct filter select #' @importFrom gggenes geom_gene_arrow geom_subgene_arrow -#' @importFrom ggplot2 aes aes_string as_labeller element_text facet_wrap ggplot guides margin scale_fill_manual theme theme_minimal ylab +#' @importFrom ggplot2 aes aes_string as_labeller element_text facet_wrap ggplot guides margin scale_fill_manual theme theme_minimal unit ylab #' @importFrom readr read_tsv #' @importFrom tidyr pivot_wider +#' @importFrom stats as.formula #' #' @return #' @export @@ -135,10 +136,10 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), ADDITIONAL_COLORS <- sample(CPCOLS, 1000, replace = TRUE) CPCOLS <- append(x = CPCOLS, values = ADDITIONAL_COLORS) ## Read IPR file - ipr_out <- read_tsv(infile_ipr, col_names = T, col_types = iprscan_cols) - ipr_out <- ipr_out %>% filter(Name %in% accessions) + ipr_out <- read_tsv(infile_ipr, col_names = T, col_types = MolEvolvR::iprscan_cols) + ipr_out <- ipr_out %>% filter(.data$Name %in% accessions) analysis_cols <- paste0("DomArch.", analysis) - infile_full <- infile_full %>% select(analysis_cols, Lineage_short, QueryName, PcPositive, AccNum) + infile_full <- infile_full %>% select(.data$analysis_cols, .data$Lineage_short, .data$QueryName, .data$PcPositive, .data$AccNum) ## To filter by Analysis analysis <- paste(analysis, collapse = "|") ## @SAM: This can't be set in stone since the analysis may change! @@ -158,22 +159,22 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), ## Need to fix this eventually based on the 'real' gene orientation! :) ipr_out$Strand <- rep("forward", nrow(ipr_out)) - ipr_out <- ipr_out %>% arrange(AccNum, StartLoc, StopLoc) + ipr_out <- ipr_out %>% arrange(.data$AccNum, .data$StartLoc, .data$StopLoc) ipr_out_sub <- filter( ipr_out, - grepl(pattern = analysis, x = Analysis) + grepl(pattern = analysis, x = .data$Analysis) ) # dynamic analysis labeller analyses <- ipr_out_sub %>% - select(Analysis) %>% + select(.data$Analysis) %>% distinct() analysis_labeler <- analyses %>% - pivot_wider(names_from = Analysis, values_from = Analysis) + pivot_wider(names_from = .data$Analysis, values_from = .data$Analysis) lookup_tbl_path <- "/data/research/jravilab/common_data/cln_lookup_tbl.tsv" - lookup_tbl <- read_tsv(lookup_tbl_path, col_names = T, col_types = lookup_table_cols) + lookup_tbl <- read_tsv(lookup_tbl_path, col_names = T, col_types = MolEvolvR::lookup_table_cols) - lookup_tbl <- lookup_tbl %>% select(-ShortName) # Already has ShortName -- Just needs SignDesc + lookup_tbl <- lookup_tbl %>% select(-.data$ShortName) # Already has ShortName -- Just needs SignDesc # ipr_out_sub = ipr_out_sub %>% select(-ShortName) # TODO: Fix lookup table and uncomment below # ipr_out_sub <- merge(ipr_out_sub, lookup_tbl, by.x = "DB.ID", by.y = "DB.ID") @@ -196,7 +197,7 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), ), color = "white") + geom_gene_arrow(fill = NA, color = "grey") + # geom_blank(data = dummies) + - facet_wrap(~Analysis, + facet_wrap(~.data$Analysis, strip.position = "top", ncol = 5, labeller = as_labeller(analysis_labeler) ) + @@ -217,9 +218,9 @@ ipr2viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), plot <- ggplot( ipr_out_sub, aes( - xmin = 1, xmax = SLength, - y = Analysis, # y = AccNum - label = ShortName + xmin = 1, xmax = .data$SLength, + y = .data$Analysis, # y = AccNum + label = .data$ShortName ) ) + geom_subgene_arrow(data = ipr_out_sub, aes_string( @@ -296,7 +297,7 @@ ipr2viz_web <- function(infile_ipr, ## @SAM, colnames, merges, everything neeeds to be done now based on the ## combined lookup table from "common_data" lookup_tbl_path <- "/data/research/jravilab/common_data/cln_lookup_tbl.tsv" - lookup_tbl <- read_tsv(lookup_tbl_path, col_names = T, col_types = .data$lookup_table_cols) + lookup_tbl <- read_tsv(lookup_tbl_path, col_names = T, col_types = MolEvolvR::lookup_table_cols) ## Read IPR file and subset by Accessions ipr_out <- read_tsv(infile_ipr, col_names = T) From c78aed2bb0a934a130ab8663cbee356c0eaf77f3 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Wed, 9 Oct 2024 11:54:47 -0600 Subject: [PATCH 18/41] adding additional imports. resolves: - https://github.com/JRaviLab/MolEvolvR/pull/64/files#r1793926923 - https://github.com/JRaviLab/MolEvolvR/pull/64/files#r1793927303 --- NAMESPACE | 2 ++ R/plotting.R | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 1d4f7f86..f75fb862 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -109,6 +109,7 @@ importFrom(assertthat,assert_that) importFrom(assertthat,has_name) importFrom(base64enc,base64encode) importFrom(biomartr,getKingdomAssemblySummary) +importFrom(d3r,d3_nest) importFrom(data.table,as.data.table) importFrom(data.table,fread) importFrom(data.table,fwrite) @@ -259,6 +260,7 @@ importFrom(stringr,str_sub) importFrom(stringr,str_trim) importFrom(stringr,word) importFrom(sunburstR,sunburst) +importFrom(sunburstR,sund2b) importFrom(tibble,as_tibble) importFrom(tibble,tibble) importFrom(tidyr,drop_na) diff --git a/R/plotting.R b/R/plotting.R index ef803f10..ba0708d3 100644 --- a/R/plotting.R +++ b/R/plotting.R @@ -1183,10 +1183,11 @@ wordcloud2_element <- function(query_data = "prot", #' then the legend will be in the descending order of the top level hierarchy. #' will be rendered. If the type is sund2b, a sund2b plot will be rendered. #' +#' @importFrom d3r d3_nest #' @importFrom dplyr arrange desc group_by_at select summarise #' @importFrom htmlwidgets onRender #' @importFrom rlang sym -#' @importFrom sunburstR sunburst +#' @importFrom sunburstR sunburst sund2b #' @importFrom tidyr drop_na separate #' #' @return @@ -1223,7 +1224,7 @@ lineage_sunburst <- function(prot, lineage_column = "Lineage", group_by_at(levels_vec) %>% summarise(size = n()) protLevels <- protLevels %>% arrange() - tree <- .data$d3_nest(protLevels, value_cols = "size") + tree <- d3_nest(protLevels, value_cols = "size") # Plot sunburst if (type == "sunburst") { From 527c470104805b093f7da3e9f45335f53945cb1a Mon Sep 17 00:00:00 2001 From: teddyCodex Date: Wed, 9 Oct 2024 22:55:15 +0100 Subject: [PATCH 19/41] update CONTRIBUTING.md --- .github/CONTRIBUTING.md | 123 ++++++++++++++++++++++------------------ 1 file changed, 69 insertions(+), 54 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 5db3f961..9fcd6b7f 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -5,72 +5,87 @@ For a detailed discussion on contributing to this and other tidyverse packages, ## Fixing typos -You can fix typos, spelling mistakes, or grammatical errors in the documentation directly using the GitHub web interface, as long as the changes are made in the _source_ file. -This generally means you'll need to edit [roxygen2 comments](https://roxygen2.r-lib.org/articles/roxygen2.html) in an `.R`, not a `.Rd` file. +You can fix typos, spelling mistakes, or grammatical errors in the documentation directly using the GitHub web interface, as long as the changes are made in the _source_ file. +This generally means you'll need to edit [roxygen2 comments](https://roxygen2.r-lib.org/articles/roxygen2.html) in an `.R`, not a `.Rd` file. You can find the `.R` file that generates the `.Rd` by reading the comment in the first line. ## Bigger changes -If you want to make a bigger change, it's a good idea to first file an issue and make sure someone from the team agrees that it’s needed. -If you’ve found a bug, please file an issue that illustrates the bug with a minimal +If you want to make a bigger change, it's a good idea to first file an issue and make sure someone from the team agrees that it’s needed. +If you’ve found a bug, please file an issue that illustrates the bug with a minimal [reprex](https://www.tidyverse.org/help/#reprex) (this will also help you write a unit test, if needed). See our guide on [how to create a great issue](https://code-review.tidyverse.org/issues/) for more advice. ### Pull request process -* Fork the package and clone onto your computer. If you haven't done this before, we recommend using `usethis`. - -* Install and load the `usethis` package with: - ``` - install.packages("usethis") - - library("usethis") - ``` -* Clone and fork the MolEvolvR package using: - ``` - usethis::create_from_github("JRaviLab/MolEvolvR", fork = TRUE) - ``` -* Install all development dependencies and then make sure the package passes R CMD check using `devtools`: - ``` - install.packages("devtools") - - library("devtools") - - devtools::install_dev_deps() - - devtools::check() - ``` - _If R CMD check doesn't pass cleanly, it's a good idea to ask for help before continuing._ - -* Create a Git branch for your pull request (PR). We recommend using - ``` - usethis::pr_init("brief-description-of-change") - ``` - -* Make your changes, commit to git, and then create a PR by running `usethis::pr_push()`, and following the prompts in your browser. - The title of your PR should briefly describe the change. - The body of your PR should contain `Fixes #issue-number`. - -* For user-facing changes, add a bullet to the top of `NEWS.md` (i.e. just below the first header). Follow the style described in . +- Fork the package and clone onto your computer. If you haven't done this before, we recommend using `usethis`. + +- Install and load the `usethis` package with: + + ``` + install.packages("usethis") + + library("usethis") + ``` + +- Clone and fork the MolEvolvR package using: + ``` + usethis::create_from_github("JRaviLab/MolEvolvR", fork = TRUE) + ``` +- Install Bioconductor dependencies: + + ``` + if (!require("BiocManager", quietly = TRUE)) + install.packages("BiocManager") + BiocManager::install(version = "3.19") + ``` + +- Install other development dependencies and then ensure that the package passes R CMD check using `devtools`: + + ``` + install.packages("devtools") + + library("devtools") + + devtools::install_dev_deps() + + devtools::check() + ``` + + _If R CMD check doesn't pass cleanly, it's a good idea to ask for help before continuing._ + +- Create a Git branch for your pull request (PR). We recommend using: + + ``` + usethis::pr_init("brief-description-of-change") + ``` + +- Make your changes, commit to git, and then create a PR by running `usethis::pr_push()`, and following the prompts in your browser. + The title of your PR should briefly describe the change. + The body of your PR should contain `Fixes #issue-number`. + + + + ### Code style -* New code should follow the tidyverse [style guide](https://style.tidyverse.org). - You can use the [styler](https://CRAN.R-project.org/package=styler) package to apply these styles, but please don't restyle code that has nothing to do with your PR. - -* Lint Your Code: Ensure your code adheres to our style guidelines by using [lintr](https://lintr.r-lib.org/): - ``` - install.packages("lintr") - - library("lintr") - - lintr::lint("path/to/your/file.R") - ``` - -* We use [roxygen2](https://cran.r-project.org/package=roxygen2), with [Markdown syntax](https://cran.r-project.org/web/packages/roxygen2/vignettes/rd-formatting.html), for documentation. - -* We use [testthat](https://cran.r-project.org/package=testthat) for unit tests. - Contributions with test cases included are easier to accept. +- New code should follow the tidyverse [style guide](https://style.tidyverse.org). + You can use the [styler](https://CRAN.R-project.org/package=styler) package to apply these styles, but please don't restyle code that has nothing to do with your PR. +- Lint Your Code: Ensure your code adheres to our style guidelines by using [lintr](https://lintr.r-lib.org/): + + ``` + install.packages("lintr") + + library("lintr") + + lintr::lint("path/to/your/file.R") + ``` + +- We use [roxygen2](https://cran.r-project.org/package=roxygen2), with [Markdown syntax](https://cran.r-project.org/web/packages/roxygen2/vignettes/rd-formatting.html), for documentation. + +- We use [testthat](https://cran.r-project.org/package=testthat) for unit tests. + Contributions with test cases included are easier to accept. ## Code of Conduct From 4ff68fb06395842093879dea47e45aaae1967225 Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Thu, 10 Oct 2024 08:27:02 +0100 Subject: [PATCH 20/41] Reverting to old function names for the following functions to create a separate pr for their updates and on a different branch: R/combine_analysis.R combine_full combine_ipr R/combine_files.R combine_files R/create_lineage_lookup.R create_lineage_lookup shorten_NA --- R/combine_analysis.R | 4 ++-- R/combine_files.R | 10 +++++----- R/create_lineage_lookup.R | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/R/combine_analysis.R b/R/combine_analysis.R index 58ce1f14..bb3b3ce2 100755 --- a/R/combine_analysis.R +++ b/R/combine_analysis.R @@ -17,7 +17,7 @@ #' @export #' #' @examples -combineFullAnalysis <- function(inpath, ret = FALSE) { +combine_full <- function(inpath, ret = FALSE) { ## Combining full_analysis files full_combnd <- combine_files(inpath, pattern = "*.full_analysis.tsv", skip = 0, @@ -44,7 +44,7 @@ combineFullAnalysis <- function(inpath, ret = FALSE) { #' @export #' #' @examples -combineIPR <- function(inpath, ret = FALSE) { +combine_ipr <- function(inpath, ret = FALSE) { ## Combining clean ipr files ipr_combnd <- combine_files(inpath, pattern = "*.iprscan_cln.tsv", skip = 0, diff --git a/R/combine_files.R b/R/combine_files.R index 455ddd53..76c5fa09 100755 --- a/R/combine_files.R +++ b/R/combine_files.R @@ -38,7 +38,7 @@ #' @export #' #' @examples -combineFiles <- function(inpath = c("../molevol_data/project_data/phage_defense/"), +combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense/"), pattern = "*full_analysis.tsv", delim = "\t", skip = 0, col_names = T) { @@ -67,7 +67,7 @@ combineFiles <- function(inpath = c("../molevol_data/project_data/phage_defense/ ## Sample Runs ## ################# # ## Combining full_analysis files -# full_combnd <- combineFiles(inpath, +# full_combnd <- combine_files(inpath, # pattern="*full_analysis.txt", skip=0, # col_names=T) # @@ -75,7 +75,7 @@ combineFiles <- function(inpath = c("../molevol_data/project_data/phage_defense/ # path="../molevol_data/project_data/slps/full_combined.tsv") # # ## Combining clean files -# cln_combnd <- combineFiles(inpath, +# cln_combnd <- combine_files(inpath, # pattern="^.*cln.txt", skip=0, # col_names=T) # @@ -86,14 +86,14 @@ combineFiles <- function(inpath = c("../molevol_data/project_data/phage_defense/ # ## Less helpful examples! # ## Combining BLAST files # ## Likely makes no sense since clustering is done per query -# cl_blast_combnd <- combineFiles(inpath, +# cl_blast_combnd <- combine_files(inpath, # pattern="^.*refseq.1e-5.txt", skip=0, # col_names=cl_blast_colnames) %>% # select(-PcPositive, -ClusterID) # # ## Combining IPR files # ## Likely makes no sense since there may be repeated AccNum from indiv. files! -# ipr_combnd <- combineFiles(inpath, +# ipr_combnd <- combine_files(inpath, # pattern="*iprscan.lins*", skip=0, # col_names=ipr_colnames) # diff --git a/R/create_lineage_lookup.R b/R/create_lineage_lookup.R index d911934a..8e365cbb 100644 --- a/R/create_lineage_lookup.R +++ b/R/create_lineage_lookup.R @@ -26,9 +26,9 @@ #' @export #' #' @examples -createLineageLookup <- function(lineage_file = here("data/rankedlineage.dmp"), +create_lineage_lookup <- function(lineage_file = here("data/rankedlineage.dmp"), outfile, taxonomic_rank = "phylum") { - .shortenNA <- function(Lineage) { + shorten_NA <- function(Lineage) { first_NA <- str_locate(Lineage, "NA")[1] if (is.na(first_NA)) { # No NAs @@ -92,7 +92,7 @@ createLineageLookup <- function(lineage_file = here("data/rankedlineage.dmp"), # Takes a while (2million rows after all) rankedLinsCombined <- rankedLins %>% unite(col = "Lineage", all_of(combined_taxonomy), sep = ">") %>% - mutate(Lineage = unlist(map(Lineage, .shortenNA))) + mutate(Lineage = unlist(map(Lineage, shorten_NA))) @@ -101,7 +101,7 @@ createLineageLookup <- function(lineage_file = here("data/rankedlineage.dmp"), -#' CreateLineageLookup <- function(assembly_path, updateAssembly = FALSE, file_type = "tsv") +#' create_lineage_lookup <- function(assembly_path, updateAssembly = FALSE, file_type = "tsv") #' { #' #' Create a look up table that goes from GCA_ID, to TaxID, to Lineage #' #' @author Samuel Chen From 035c5e13b4cfe54b4ba7ff1d5c7618ade13720d1 Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Thu, 10 Oct 2024 08:41:47 +0100 Subject: [PATCH 21/41] minor updates to namespace and Rd files after running devtool::check() --- NAMESPACE | 8 ++++---- man/{combineFiles.Rd => combine_files.Rd} | 6 +++--- man/{combineFullAnalysis.Rd => combine_full.Rd} | 6 +++--- man/{combineIPR.Rd => combine_ipr.Rd} | 6 +++--- man/{createLineageLookup.Rd => create_lineage_lookup.Rd} | 6 +++--- 5 files changed, 16 insertions(+), 16 deletions(-) rename man/{combineFiles.Rd => combine_files.Rd} (92%) rename man/{combineFullAnalysis.Rd => combine_full.Rd} (69%) rename man/{combineIPR.Rd => combine_ipr.Rd} (74%) rename man/{createLineageLookup.Rd => create_lineage_lookup.Rd} (91%) diff --git a/NAMESPACE b/NAMESPACE index cd135cc8..f49975b4 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -26,9 +26,9 @@ export(cleanGeneDescription) export(cleanGenomicContext) export(cleanLineage) export(cleanSpecies) -export(combineFiles) -export(combineFullAnalysis) -export(combineIPR) +export(combine_files) +export(combine_full) +export(combine_ipr) export(condenseRepeatedDomains) export(convert2TitleCase) export(convertAlignment2FA) @@ -37,8 +37,8 @@ export(convert_fa2tre) export(count_bycol) export(count_to_sunburst) export(count_to_treemap) -export(createLineageLookup) export(create_all_col_params) +export(create_lineage_lookup) export(create_one_col_params) export(domain_network) export(efetch_ipg) diff --git a/man/combineFiles.Rd b/man/combine_files.Rd similarity index 92% rename from man/combineFiles.Rd rename to man/combine_files.Rd index 3b56b923..4126eb9e 100644 --- a/man/combineFiles.Rd +++ b/man/combine_files.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_files.R -\name{combineFiles} -\alias{combineFiles} +\name{combine_files} +\alias{combine_files} \title{Download the combined assembly summaries of genbank and refseq} \usage{ -combineFiles( +combine_files( inpath = c("../molevol_data/project_data/phage_defense/"), pattern = "*full_analysis.tsv", delim = "\\t", diff --git a/man/combineFullAnalysis.Rd b/man/combine_full.Rd similarity index 69% rename from man/combineFullAnalysis.Rd rename to man/combine_full.Rd index 35925e86..f4e6597b 100644 --- a/man/combineFullAnalysis.Rd +++ b/man/combine_full.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_analysis.R -\name{combineFullAnalysis} -\alias{combineFullAnalysis} +\name{combine_full} +\alias{combine_full} \title{Combining full_analysis files} \usage{ -combineFullAnalysis(inpath, ret = FALSE) +combine_full(inpath, ret = FALSE) } \arguments{ \item{ret}{} diff --git a/man/combineIPR.Rd b/man/combine_ipr.Rd similarity index 74% rename from man/combineIPR.Rd rename to man/combine_ipr.Rd index 035c4274..52aa3057 100644 --- a/man/combineIPR.Rd +++ b/man/combine_ipr.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_analysis.R -\name{combineIPR} -\alias{combineIPR} +\name{combine_ipr} +\alias{combine_ipr} \title{Combining clean ipr files} \usage{ -combineIPR(inpath, ret = FALSE) +combine_ipr(inpath, ret = FALSE) } \arguments{ \item{ret}{} diff --git a/man/createLineageLookup.Rd b/man/create_lineage_lookup.Rd similarity index 91% rename from man/createLineageLookup.Rd rename to man/create_lineage_lookup.Rd index 5dbab978..51670f35 100644 --- a/man/createLineageLookup.Rd +++ b/man/create_lineage_lookup.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_lineage_lookup.R -\name{createLineageLookup} -\alias{createLineageLookup} +\name{create_lineage_lookup} +\alias{create_lineage_lookup} \title{Create a look up table that goes from TaxID, to Lineage} \usage{ -createLineageLookup( +create_lineage_lookup( lineage_file = here("data/rankedlineage.dmp"), outfile, taxonomic_rank = "phylum" From fb5ac23f8a3e8e5709498aa24308a950802d1c29 Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Thu, 10 Oct 2024 09:20:22 +0100 Subject: [PATCH 22/41] Renamed the following function; R/combine_analysis.R combine_full combine_ipr R/combine_files.R combine_files R/create_lineage_lookup.R create_lineage_lookup shorten_NA with approved names from #44 --- NAMESPACE | 8 ++++---- R/acc2lin.R | 2 +- R/combine_analysis.R | 8 ++++---- R/combine_files.R | 10 +++++----- R/create_lineage_lookup.R | 8 ++++---- R/lineage.R | 4 ++-- man/GCA2lin.Rd | 2 +- man/{combine_files.Rd => combineFiles.Rd} | 6 +++--- man/{combine_full.Rd => combineFullAnalysis.Rd} | 6 +++--- man/{combine_ipr.Rd => combineIPR.Rd} | 6 +++--- ...create_lineage_lookup.Rd => createLineageLookup.Rd} | 6 +++--- man/ipg2lin.Rd | 2 +- 12 files changed, 34 insertions(+), 34 deletions(-) rename man/{combine_files.Rd => combineFiles.Rd} (92%) rename man/{combine_full.Rd => combineFullAnalysis.Rd} (69%) rename man/{combine_ipr.Rd => combineIPR.Rd} (74%) rename man/{create_lineage_lookup.Rd => createLineageLookup.Rd} (91%) diff --git a/NAMESPACE b/NAMESPACE index f49975b4..cd135cc8 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -26,9 +26,9 @@ export(cleanGeneDescription) export(cleanGenomicContext) export(cleanLineage) export(cleanSpecies) -export(combine_files) -export(combine_full) -export(combine_ipr) +export(combineFiles) +export(combineFullAnalysis) +export(combineIPR) export(condenseRepeatedDomains) export(convert2TitleCase) export(convertAlignment2FA) @@ -37,8 +37,8 @@ export(convert_fa2tre) export(count_bycol) export(count_to_sunburst) export(count_to_treemap) +export(createLineageLookup) export(create_all_col_params) -export(create_lineage_lookup) export(create_one_col_params) export(domain_network) export(efetch_ipg) diff --git a/R/acc2lin.R b/R/acc2lin.R index dfb33da9..a6551247 100644 --- a/R/acc2lin.R +++ b/R/acc2lin.R @@ -277,7 +277,7 @@ efetch_ipg <- function(accnums, out_path, plan = "sequential") { #' This file can be generated using the "DownloadAssemblySummary()" function #' @param lineagelookup_path String of the path to the lineage lookup file #' (taxid to lineage mapping). This file can be generated using the -#' "create_lineage_lookup()" function +#' "createLineageLookup()" function #' #' @importFrom data.table fread #' diff --git a/R/combine_analysis.R b/R/combine_analysis.R index bb3b3ce2..55e36925 100755 --- a/R/combine_analysis.R +++ b/R/combine_analysis.R @@ -17,9 +17,9 @@ #' @export #' #' @examples -combine_full <- function(inpath, ret = FALSE) { +combineFullAnalysis <- function(inpath, ret = FALSE) { ## Combining full_analysis files - full_combnd <- combine_files(inpath, + full_combnd <- combineFiles(inpath, pattern = "*.full_analysis.tsv", skip = 0, col_names = T ) @@ -44,9 +44,9 @@ combine_full <- function(inpath, ret = FALSE) { #' @export #' #' @examples -combine_ipr <- function(inpath, ret = FALSE) { +combineIPR <- function(inpath, ret = FALSE) { ## Combining clean ipr files - ipr_combnd <- combine_files(inpath, + ipr_combnd <- combineFiles(inpath, pattern = "*.iprscan_cln.tsv", skip = 0, col_names = T ) diff --git a/R/combine_files.R b/R/combine_files.R index 76c5fa09..455ddd53 100755 --- a/R/combine_files.R +++ b/R/combine_files.R @@ -38,7 +38,7 @@ #' @export #' #' @examples -combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense/"), +combineFiles <- function(inpath = c("../molevol_data/project_data/phage_defense/"), pattern = "*full_analysis.tsv", delim = "\t", skip = 0, col_names = T) { @@ -67,7 +67,7 @@ combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense ## Sample Runs ## ################# # ## Combining full_analysis files -# full_combnd <- combine_files(inpath, +# full_combnd <- combineFiles(inpath, # pattern="*full_analysis.txt", skip=0, # col_names=T) # @@ -75,7 +75,7 @@ combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense # path="../molevol_data/project_data/slps/full_combined.tsv") # # ## Combining clean files -# cln_combnd <- combine_files(inpath, +# cln_combnd <- combineFiles(inpath, # pattern="^.*cln.txt", skip=0, # col_names=T) # @@ -86,14 +86,14 @@ combine_files <- function(inpath = c("../molevol_data/project_data/phage_defense # ## Less helpful examples! # ## Combining BLAST files # ## Likely makes no sense since clustering is done per query -# cl_blast_combnd <- combine_files(inpath, +# cl_blast_combnd <- combineFiles(inpath, # pattern="^.*refseq.1e-5.txt", skip=0, # col_names=cl_blast_colnames) %>% # select(-PcPositive, -ClusterID) # # ## Combining IPR files # ## Likely makes no sense since there may be repeated AccNum from indiv. files! -# ipr_combnd <- combine_files(inpath, +# ipr_combnd <- combineFiles(inpath, # pattern="*iprscan.lins*", skip=0, # col_names=ipr_colnames) # diff --git a/R/create_lineage_lookup.R b/R/create_lineage_lookup.R index 8e365cbb..78e79048 100644 --- a/R/create_lineage_lookup.R +++ b/R/create_lineage_lookup.R @@ -26,9 +26,9 @@ #' @export #' #' @examples -create_lineage_lookup <- function(lineage_file = here("data/rankedlineage.dmp"), +createLineageLookup <- function(lineage_file = here("data/rankedlineage.dmp"), outfile, taxonomic_rank = "phylum") { - shorten_NA <- function(Lineage) { + .shortenNA <- function(Lineage) { first_NA <- str_locate(Lineage, "NA")[1] if (is.na(first_NA)) { # No NAs @@ -92,7 +92,7 @@ create_lineage_lookup <- function(lineage_file = here("data/rankedlineage.dmp"), # Takes a while (2million rows after all) rankedLinsCombined <- rankedLins %>% unite(col = "Lineage", all_of(combined_taxonomy), sep = ">") %>% - mutate(Lineage = unlist(map(Lineage, shorten_NA))) + mutate(Lineage = unlist(map(Lineage, .shortenNA))) @@ -101,7 +101,7 @@ create_lineage_lookup <- function(lineage_file = here("data/rankedlineage.dmp"), -#' create_lineage_lookup <- function(assembly_path, updateAssembly = FALSE, file_type = "tsv") +#' createLineageLookup <- function(assembly_path, updateAssembly = FALSE, file_type = "tsv") #' { #' #' Create a look up table that goes from GCA_ID, to TaxID, to Lineage #' #' @author Samuel Chen diff --git a/R/lineage.R b/R/lineage.R index 20acec04..7ceed847 100644 --- a/R/lineage.R +++ b/R/lineage.R @@ -77,7 +77,7 @@ DownloadAssemblySummary <- function(outpath, #' This file can be generated using the "DownloadAssemblySummary()" function #' @param lineagelookup_path String of the path to the lineage lookup file #' (taxid to lineage mapping). This file can be generated using the -#' "create_lineage_lookup()" function +#' "createLineageLookup()" function #' @param acc_col #' #' @importFrom dplyr pull @@ -309,7 +309,7 @@ efetch_ipg <- function(accessions, out_path, plan = "multicore") { #' @param genbank_assembly_path #' @param lineagelookup_path String of the path to the lineage lookup file #' (taxid to lineage mapping). This file can be generated using the -#' "create_lineage_lookup()" function +#' "createLineageLookup()" function #' #' @importFrom data.table fread setnames #' diff --git a/man/GCA2lin.Rd b/man/GCA2lin.Rd index ad83ca39..47acc3d7 100644 --- a/man/GCA2lin.Rd +++ b/man/GCA2lin.Rd @@ -19,7 +19,7 @@ This file can be generated using the "DownloadAssemblySummary()" function} \item{lineagelookup_path}{String of the path to the lineage lookup file (taxid to lineage mapping). This file can be generated using the -"create_lineage_lookup()" function} +"createLineageLookup()" function} \item{acc_col}{} } diff --git a/man/combine_files.Rd b/man/combineFiles.Rd similarity index 92% rename from man/combine_files.Rd rename to man/combineFiles.Rd index 4126eb9e..3b56b923 100644 --- a/man/combine_files.Rd +++ b/man/combineFiles.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_files.R -\name{combine_files} -\alias{combine_files} +\name{combineFiles} +\alias{combineFiles} \title{Download the combined assembly summaries of genbank and refseq} \usage{ -combine_files( +combineFiles( inpath = c("../molevol_data/project_data/phage_defense/"), pattern = "*full_analysis.tsv", delim = "\\t", diff --git a/man/combine_full.Rd b/man/combineFullAnalysis.Rd similarity index 69% rename from man/combine_full.Rd rename to man/combineFullAnalysis.Rd index f4e6597b..35925e86 100644 --- a/man/combine_full.Rd +++ b/man/combineFullAnalysis.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_analysis.R -\name{combine_full} -\alias{combine_full} +\name{combineFullAnalysis} +\alias{combineFullAnalysis} \title{Combining full_analysis files} \usage{ -combine_full(inpath, ret = FALSE) +combineFullAnalysis(inpath, ret = FALSE) } \arguments{ \item{ret}{} diff --git a/man/combine_ipr.Rd b/man/combineIPR.Rd similarity index 74% rename from man/combine_ipr.Rd rename to man/combineIPR.Rd index 52aa3057..035c4274 100644 --- a/man/combine_ipr.Rd +++ b/man/combineIPR.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_analysis.R -\name{combine_ipr} -\alias{combine_ipr} +\name{combineIPR} +\alias{combineIPR} \title{Combining clean ipr files} \usage{ -combine_ipr(inpath, ret = FALSE) +combineIPR(inpath, ret = FALSE) } \arguments{ \item{ret}{} diff --git a/man/create_lineage_lookup.Rd b/man/createLineageLookup.Rd similarity index 91% rename from man/create_lineage_lookup.Rd rename to man/createLineageLookup.Rd index 51670f35..5dbab978 100644 --- a/man/create_lineage_lookup.Rd +++ b/man/createLineageLookup.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_lineage_lookup.R -\name{create_lineage_lookup} -\alias{create_lineage_lookup} +\name{createLineageLookup} +\alias{createLineageLookup} \title{Create a look up table that goes from TaxID, to Lineage} \usage{ -create_lineage_lookup( +createLineageLookup( lineage_file = here("data/rankedlineage.dmp"), outfile, taxonomic_rank = "phylum" diff --git a/man/ipg2lin.Rd b/man/ipg2lin.Rd index 453668b0..5850e86c 100644 --- a/man/ipg2lin.Rd +++ b/man/ipg2lin.Rd @@ -29,7 +29,7 @@ file} \item{lineagelookup_path}{String of the path to the lineage lookup file (taxid to lineage mapping). This file can be generated using the -"create_lineage_lookup()" function} +"createLineageLookup()" function} \item{assembly_path}{String of the path to the assembly_summary path This file can be generated using the "DownloadAssemblySummary()" function} From 106eb14b4e2eace66737a07cf5840011e490d116 Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Thu, 10 Oct 2024 10:24:49 +0100 Subject: [PATCH 23/41] reverting to old function names; make_opts2procs, map_advanced_opts2procs, get_proc_medians, write_proc_medians_table, write_proc_medians_yml, get_proc_weights, advanced_opts2est_walltime in R/assign_job_queue.R to be updated in a separate full request --- NAMESPACE | 18 ++-- R/assign_job_queue.R | 84 +++++++++---------- ...tions.Rd => advanced_opts2est_walltime.Rd} | 10 +-- ...{assignJobQueue.Rd => assign_job_queue.Rd} | 12 +-- ...eProcessRuntime.Rd => get_proc_medians.Rd} | 10 +-- ...sRuntimeWeights.Rd => get_proc_weights.Rd} | 8 +- ...apOption2Process.Rd => make_opts2procs.Rd} | 8 +- ...2Process.Rd => map_advanced_opts2procs.Rd} | 8 +- ...llTimes.Rd => plot_estimated_walltimes.Rd} | 8 +- ...ime2TSV.Rd => write_proc_medians_table.Rd} | 8 +- ...timeToYML.Rd => write_proc_medians_yml.Rd} | 10 +-- 11 files changed, 92 insertions(+), 92 deletions(-) rename man/{calculateEstimatedWallTimeFromOptions.Rd => advanced_opts2est_walltime.Rd} (73%) rename man/{assignJobQueue.Rd => assign_job_queue.Rd} (68%) rename man/{calculateProcessRuntime.Rd => get_proc_medians.Rd} (76%) rename man/{getProcessRuntimeWeights.Rd => get_proc_weights.Rd} (73%) rename man/{mapOption2Process.Rd => make_opts2procs.Rd} (75%) rename man/{mapAdvOption2Process.Rd => map_advanced_opts2procs.Rd} (76%) rename man/{plotEstimatedWallTimes.Rd => plot_estimated_walltimes.Rd} (77%) rename man/{writeProcessRuntime2TSV.Rd => write_proc_medians_table.Rd} (77%) rename man/{writeProcessRuntimeToYML.Rd => write_proc_medians_yml.Rd} (74%) diff --git a/NAMESPACE b/NAMESPACE index f49975b4..b4be51ec 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -15,11 +15,10 @@ export(add_leaves) export(add_lins) export(add_name) export(add_tax) +export(advanced_opts2est_walltime) export(alignFasta) export(assert_count_df) -export(assignJobQueue) -export(calculateEstimatedWallTimeFromOptions) -export(calculateProcessRuntime) +export(assign_job_queue) export(cleanClusters) export(cleanDomainArchitecture) export(cleanGeneDescription) @@ -54,9 +53,10 @@ export(generate_all_aln2fa) export(generate_fa2tre) export(generate_msa) export(generate_trees) -export(getProcessRuntimeWeights) export(get_accnums_from_fasta_file) export(get_job_message) +export(get_proc_medians) +export(get_proc_weights) export(ipg2lin) export(ipr2viz) export(ipr2viz_web) @@ -66,12 +66,12 @@ export(lineage.domain_repeats.plot) export(lineage.neighbors.plot) export(lineage_sunburst) export(make_job_results_url) +export(make_opts2procs) export(mapAcc2Name) -export(mapAdvOption2Process) -export(mapOption2Process) export(map_acc2name) +export(map_advanced_opts2procs) export(msa_pdf) -export(plotEstimatedWallTimes) +export(plot_estimated_walltimes) export(prot2tax) export(prot2tax_old) export(removeAsterisks) @@ -103,8 +103,8 @@ export(wordcloud2_element) export(wordcloud3) export(wordcloud_element) export(write.MsaAAMultipleAlignment) -export(writeProcessRuntime2TSV) -export(writeProcessRuntimeToYML) +export(write_proc_medians_table) +export(write_proc_medians_yml) importFrom(Biostrings,AAStringSet) importFrom(Biostrings,readAAStringSet) importFrom(Biostrings,toString) diff --git a/R/assign_job_queue.R b/R/assign_job_queue.R index f1fcb6db..c531fb09 100644 --- a/R/assign_job_queue.R +++ b/R/assign_job_queue.R @@ -3,16 +3,16 @@ # pipeline. # to use this, construct paths like so: file.path(common_root, "path", "to", "file.R") # for example, the reference for this file would be: -# file.path(common_root, "molevol_scripts", "R", "assignJobQueue.R") +# file.path(common_root, "molevol_scripts", "R", "assign_job_queue.R") common_root <- Sys.getenv("COMMON_SRC_ROOT") #' Construct list where names (MolEvolvR advanced options) point to processes #' #' @return list where names (MolEvolvR advanced options) point to processes #' -#' example: list_opts2procs <- mapOption2Process +#' example: list_opts2procs <- make_opts2procs #' @export -mapOption2Process <- function() { +make_opts2procs <- function() { tryCatch({ opts2processes <- list( "homology_search" = c("dblast", "dblast_cleanup"), @@ -26,7 +26,7 @@ mapOption2Process <- function() { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("mapOption2Process function execution completed.") + message("make_opts2procs function execution completed.") }) } @@ -40,16 +40,16 @@ mapOption2Process <- function() { #' #' example: #' advanced_opts <- c("homology_search", "domain_architecture") -#' procs <- mapAdvOption2Process(advanced_opts) +#' procs <- map_advanced_opts2procs(advanced_opts) #' @export -mapAdvOption2Process <- function(advanced_opts) { +map_advanced_opts2procs <- function(advanced_opts) { if (!is.character(advanced_opts)) { stop("Argument must be a character vector!") } tryCatch({ # append 'always' to add procs that always run advanced_opts <- c(advanced_opts, "always") - opts2proc <- mapOption2Process() + opts2proc <- make_opts2procs() # setup index for opts2proc based on advanced options idx <- which(names(opts2proc) %in% advanced_opts) # extract processes that will run @@ -60,7 +60,7 @@ mapAdvOption2Process <- function(advanced_opts) { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("mapOption2Process function execution completed.") + message("make_opts2procs function execution completed.") }) } @@ -80,14 +80,14 @@ mapAdvOption2Process <- function(advanced_opts) { #' #' 1) #' dir_job_results <- "/data/scratch/janani/molevolvr_out" -#' list_proc_medians <- calculateProcessRuntime(dir_job_results) +#' list_proc_medians <- get_proc_medians(dir_job_results) #' #' 2) from outside container environment #' common_root <- "/data/molevolvr_transfer/molevolvr_dev" #' dir_job_results <- "/data/molevolvr_transfer/molevolvr_dev/job_results" -#' list_proc_medians <- calculateProcessRuntime(dir_job_results) +#' list_proc_medians <- get_proc_medians(dir_job_results) #' @export -calculateProcessRuntime <- function(dir_job_results) { +get_proc_medians <- function(dir_job_results) { tryCatch({ # Check if dir_job_results is a character string if (!is.character(dir_job_results) || length(dir_job_results) != 1) { @@ -139,7 +139,7 @@ calculateProcessRuntime <- function(dir_job_results) { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("calculateProcessRuntime function execution completed.") + message("get_proc_medians function execution completed.") }) } @@ -156,12 +156,12 @@ calculateProcessRuntime <- function(dir_job_results) { #' #' @return [tbl_df] 2 columns: 1) process and 2) median seconds #' -#' example: writeProcessRuntime2TSV( +#' example: write_proc_medians_table( #' "/data/scratch/janani/molevolvr_out/", #' "/data/scratch/janani/molevolvr_out/log_tbl.tsv" #' ) #' @export -writeProcessRuntime2TSV <- function(dir_job_results, filepath) { +write_proc_medians_table <- function(dir_job_results, filepath) { tryCatch({ # Error handling for input arguments if (!is.character(dir_job_results) || length(dir_job_results) != 1) { @@ -175,7 +175,7 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { if (!is.character(filepath) || length(filepath) != 1) { stop("Input 'filepath' must be a single character string.") } - df_proc_medians <- calculateProcessRuntime(dir_job_results) |> + df_proc_medians <- get_proc_medians(dir_job_results) |> tibble::as_tibble() |> tidyr::pivot_longer( dplyr::everything(), @@ -192,7 +192,7 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("writeProcessRuntime2TSV function execution completed.") + message("write_proc_medians_table function execution completed.") }) } @@ -201,7 +201,7 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { #' their median runtimes in seconds to the path specified by 'filepath'. #' #' The default value of filepath is the value of the env var -#' MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntimeToYML() also uses as its default +#' MOLEVOLVR_PROC_WEIGHTS, which write_proc_medians_yml() also uses as its default #' read location. #' #' @param dir_job_results [chr] path to MolEvolvR job_results directory @@ -212,13 +212,13 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { #' #' @examples #' \dontrun{ -#' writeProcessRuntimeToYML( +#' write_proc_medians_yml( #' "/data/scratch/janani/molevolvr_out/", #' "/data/scratch/janani/molevolvr_out/log_tbl.yml" #' ) #' } #' @export -writeProcessRuntimeToYML <- function(dir_job_results, filepath = NULL) { +write_proc_medians_yml <- function(dir_job_results, filepath = NULL) { tryCatch({ # Error handling for dir_job_results arguments if (!is.character(dir_job_results) || length(dir_job_results) != 1) { @@ -238,7 +238,7 @@ writeProcessRuntimeToYML <- function(dir_job_results, filepath = NULL) { stop("Input 'filepath' must be a single character string.") } - medians <- calculateProcessRuntime(dir_job_results) + medians <- get_proc_medians(dir_job_results) yaml::write_yaml(medians, filepath) }, error = function(e) { message(paste("Encountered an error: "), e$message) @@ -261,9 +261,9 @@ writeProcessRuntimeToYML <- function(dir_job_results, filepath = NULL) { #' #' @return [list] names: processes; values: median runtime (seconds) #' -#' example: writeProcessRuntimeToYML() +#' example: write_proc_medians_yml() #' @export -getProcessRuntimeWeights <- function(medians_yml_path = NULL) { +get_proc_weights <- function(medians_yml_path = NULL) { if (is.null(medians_yml_path)) { medians_yml_path <- file.path(common_root, "molevol_scripts", @@ -273,7 +273,7 @@ getProcessRuntimeWeights <- function(medians_yml_path = NULL) { proc_weights <- tryCatch({ # attempt to read the weights from the YAML file produced by - # writeProcessRuntimeToYML() + # write_proc_medians_yml() if (stringr::str_trim(medians_yml_path) == "") { stop( stringr::str_glue("medians_yml_path is empty @@ -285,7 +285,7 @@ getProcessRuntimeWeights <- function(medians_yml_path = NULL) { }, # to avoid fatal errors in reading the proc weights yaml, # some median process runtimes have been hardcoded based on - # the result of calculateProcessRuntime() from Jan 2024 + # the result of get_proc_medians() from Jan 2024 error = function(cond) { proc_weights <- list( "dblast" = 2810, @@ -306,7 +306,7 @@ getProcessRuntimeWeights <- function(medians_yml_path = NULL) { #' calculate the total estimated walltime for the job #' #' @param advanced_opts character vector of MolEvolvR advanced options -#' (see mapOption2Process for the options) +#' (see make_opts2procs for the options) #' @param n_inputs total number of input proteins #' #' @importFrom dplyr if_else @@ -314,11 +314,11 @@ getProcessRuntimeWeights <- function(medians_yml_path = NULL) { #' #' @return total estimated number of seconds a job will process (walltime) #' -#' example: calculateEstimatedWallTimeFromOptions(c("homology_search", +#' example: advanced_opts2est_walltime (c("homology_search", #' "domain_architecture"), #' n_inputs = 3, n_hits = 50L) #' @export -calculateEstimatedWallTimeFromOptions <- function(advanced_opts, +advanced_opts2est_walltime <- function(advanced_opts, n_inputs = 1L, n_hits = NULL, verbose = FALSE) { @@ -348,7 +348,7 @@ calculateEstimatedWallTimeFromOptions <- function(advanced_opts, } # Get process weights - proc_weights <- writeProcessRuntimeToYML() + proc_weights <- write_proc_medians_yml() if (!is.list(proc_weights)) { stop("Process weights could not be retrieved correctly.") } @@ -357,7 +357,7 @@ calculateEstimatedWallTimeFromOptions <- function(advanced_opts, proc_weights <- proc_weights[order(names(proc_weights))] |> unlist() all_procs <- names(proc_weights) |> sort() # get processes from advanced options and sort by names - procs_from_opts <- mapAdvOption2Process(advanced_opts) + procs_from_opts <- map_advanced_opts2procs(advanced_opts) procs_from_opts <- sort(procs_from_opts) # binary encode: yes proc will run (1); else 0 binary_proc_vec <- dplyr::if_else(all_procs %in% procs_from_opts, 1L, 0L) @@ -366,7 +366,7 @@ calculateEstimatedWallTimeFromOptions <- function(advanced_opts, as.numeric() # calculate the additional processes to run for the homologous hits if ("homology_search" %in% advanced_opts) { - opts2procs <- mapOption2Process() + opts2procs <- make_opts2procs() # exclude the homology search processes for the homologous hits procs2exclude_for_homologs <- opts2procs[["homology_search"]] procs_homologs <- procs_from_opts[!(procs_from_opts @@ -380,7 +380,7 @@ calculateEstimatedWallTimeFromOptions <- function(advanced_opts, } if (verbose) { msg <- stringr::str_glue( - "warnings from calculateEstimatedWallTimeFromOptions():\n", + "warnings from advanced_opts2est_walltime ():\n", "\tn_inputs={n_inputs}\n", "\tn_hits={ifelse(is.null(n_hits), 'null', n_hits)}\n", "\test_walltime={est_walltime}\n\n" @@ -393,7 +393,7 @@ calculateEstimatedWallTimeFromOptions <- function(advanced_opts, }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("calculateEstimatedWallTimeFromOptions + message("advanced_opts2est_walltime function execution completed.") }) @@ -403,18 +403,18 @@ calculateEstimatedWallTimeFromOptions <- function(advanced_opts, #' Decision function to assign job queue #' #' @param t_sec_estimate estimated number of seconds a job will process -#' (from calculateEstimatedWallTimeFromOptions()) +#' (from advanced_opts2est_walltime ()) #' @param t_long threshold value that defines the lower bound for assigning a #' job to the "long queue" #' #' @return a string of "short" or "long" #' #' example: -#' calculateEstimatedWallTimeFromOptions(c("homology_search", +#' advanced_opts2est_walltime (c("homology_search", #' "domain_architecture"), 3) |> -#' assignJobQueue() +#' assign_job_queue() #' @export -assignJobQueue <- function( +assign_job_queue <- function( t_sec_estimate, t_cutoff = 21600 # 6 hours ) { @@ -434,7 +434,7 @@ assignJobQueue <- function( }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("assignJobQueue function execution completed.") + message("assign_job_queue function execution completed.") }) } @@ -451,13 +451,13 @@ assignJobQueue <- function( #' @return line plot object #' #' example: -#' p <- plotEstimatedWallTimes() +#' p <- plot_estimated_walltimes() #' ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_ #' dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) #' @export -plotEstimatedWallTimes <- function() { +plot_estimated_walltimes <- function() { tryCatch({ - opts <- mapOption2Process() |> names() + opts <- make_opts2procs() |> names() # get all possible submission permutations (powerset) get_powerset <- function(vec) { # generate powerset (do not include empty set) @@ -482,7 +482,7 @@ plotEstimatedWallTimes <- function() { } else { NULL } - est_walltime <- calculateEstimatedWallTimeFromOptions( + est_walltime <- advanced_opts2est_walltime ( advanced_opts, n_inputs = i, n_hits = n_hits, @@ -541,7 +541,7 @@ plotEstimatedWallTimes <- function() { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("plotEstimatedWallTimes function execution completed.") + message("plot_estimated_walltimes function execution completed.") }) } diff --git a/man/calculateEstimatedWallTimeFromOptions.Rd b/man/advanced_opts2est_walltime.Rd similarity index 73% rename from man/calculateEstimatedWallTimeFromOptions.Rd rename to man/advanced_opts2est_walltime.Rd index e4eec3fd..02ae9621 100644 --- a/man/calculateEstimatedWallTimeFromOptions.Rd +++ b/man/advanced_opts2est_walltime.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{calculateEstimatedWallTimeFromOptions} -\alias{calculateEstimatedWallTimeFromOptions} +\name{advanced_opts2est_walltime} +\alias{advanced_opts2est_walltime} \title{Given MolEvolvR advanced options and number of inputs, calculate the total estimated walltime for the job} \usage{ -calculateEstimatedWallTimeFromOptions( +advanced_opts2est_walltime( advanced_opts, n_inputs = 1L, n_hits = NULL, @@ -14,14 +14,14 @@ calculateEstimatedWallTimeFromOptions( } \arguments{ \item{advanced_opts}{character vector of MolEvolvR advanced options -(see mapOption2Process for the options)} +(see make_opts2procs for the options)} \item{n_inputs}{total number of input proteins} } \value{ total estimated number of seconds a job will process (walltime) -example: calculateEstimatedWallTimeFromOptions(c("homology_search", +example: advanced_opts2est_walltime (c("homology_search", "domain_architecture"), n_inputs = 3, n_hits = 50L) } diff --git a/man/assignJobQueue.Rd b/man/assign_job_queue.Rd similarity index 68% rename from man/assignJobQueue.Rd rename to man/assign_job_queue.Rd index 27511b6a..d2650fed 100644 --- a/man/assignJobQueue.Rd +++ b/man/assign_job_queue.Rd @@ -1,14 +1,14 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{assignJobQueue} -\alias{assignJobQueue} +\name{assign_job_queue} +\alias{assign_job_queue} \title{Decision function to assign job queue} \usage{ -assignJobQueue(t_sec_estimate, t_cutoff = 21600) +assign_job_queue(t_sec_estimate, t_cutoff = 21600) } \arguments{ \item{t_sec_estimate}{estimated number of seconds a job will process -(from calculateEstimatedWallTimeFromOptions())} +(from advanced_opts2est_walltime ())} \item{t_long}{threshold value that defines the lower bound for assigning a job to the "long queue"} @@ -17,9 +17,9 @@ job to the "long queue"} a string of "short" or "long" example: -calculateEstimatedWallTimeFromOptions(c("homology_search", +advanced_opts2est_walltime (c("homology_search", "domain_architecture"), 3) |> -assignJobQueue() +assign_job_queue() } \description{ Decision function to assign job queue diff --git a/man/calculateProcessRuntime.Rd b/man/get_proc_medians.Rd similarity index 76% rename from man/calculateProcessRuntime.Rd rename to man/get_proc_medians.Rd index bb6dd1ed..b6db0b56 100644 --- a/man/calculateProcessRuntime.Rd +++ b/man/get_proc_medians.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{calculateProcessRuntime} -\alias{calculateProcessRuntime} +\name{get_proc_medians} +\alias{get_proc_medians} \title{Scrape MolEvolvR logs and calculate median processes} \usage{ -calculateProcessRuntime(dir_job_results) +get_proc_medians(dir_job_results) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results @@ -21,12 +21,12 @@ examples: } dir_job_results <- "/data/scratch/janani/molevolvr_out" -list_proc_medians <- calculateProcessRuntime(dir_job_results) +list_proc_medians <- get_proc_medians(dir_job_results) \enumerate{ \item from outside container environment common_root <- "/data/molevolvr_transfer/molevolvr_dev" dir_job_results <- "/data/molevolvr_transfer/molevolvr_dev/job_results" -list_proc_medians <- calculateProcessRuntime(dir_job_results) +list_proc_medians <- get_proc_medians(dir_job_results) } } \description{ diff --git a/man/getProcessRuntimeWeights.Rd b/man/get_proc_weights.Rd similarity index 73% rename from man/getProcessRuntimeWeights.Rd rename to man/get_proc_weights.Rd index 8eff0347..f48585cc 100644 --- a/man/getProcessRuntimeWeights.Rd +++ b/man/get_proc_weights.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{getProcessRuntimeWeights} -\alias{getProcessRuntimeWeights} +\name{get_proc_weights} +\alias{get_proc_weights} \title{Quickly get the runtime weights for MolEvolvR backend processes} \usage{ -getProcessRuntimeWeights(medians_yml_path = NULL) +get_proc_weights(medians_yml_path = NULL) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results @@ -13,7 +13,7 @@ directory} \value{ \link{list} names: processes; values: median runtime (seconds) -example: writeProcessRuntimeToYML() +example: write_proc_medians_yml() } \description{ Quickly get the runtime weights for MolEvolvR backend processes diff --git a/man/mapOption2Process.Rd b/man/make_opts2procs.Rd similarity index 75% rename from man/mapOption2Process.Rd rename to man/make_opts2procs.Rd index ff6905c5..07e208b2 100644 --- a/man/mapOption2Process.Rd +++ b/man/make_opts2procs.Rd @@ -1,15 +1,15 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{mapOption2Process} -\alias{mapOption2Process} +\name{make_opts2procs} +\alias{make_opts2procs} \title{Construct list where names (MolEvolvR advanced options) point to processes} \usage{ -mapOption2Process() +make_opts2procs() } \value{ list where names (MolEvolvR advanced options) point to processes -example: list_opts2procs <- mapOption2Process +example: list_opts2procs <- make_opts2procs } \description{ Construct list where names (MolEvolvR advanced options) point to processes diff --git a/man/mapAdvOption2Process.Rd b/man/map_advanced_opts2procs.Rd similarity index 76% rename from man/mapAdvOption2Process.Rd rename to man/map_advanced_opts2procs.Rd index 5bd9ee65..631708b4 100644 --- a/man/mapAdvOption2Process.Rd +++ b/man/map_advanced_opts2procs.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{mapAdvOption2Process} -\alias{mapAdvOption2Process} +\name{map_advanced_opts2procs} +\alias{map_advanced_opts2procs} \title{Use MolEvolvR advanced options to get associated processes} \usage{ -mapAdvOption2Process(advanced_opts) +map_advanced_opts2procs(advanced_opts) } \arguments{ \item{advanced_opts}{character vector of MolEvolvR advanced options} @@ -15,7 +15,7 @@ the advanced options example: advanced_opts <- c("homology_search", "domain_architecture") -procs <- mapAdvOption2Process(advanced_opts) +procs <- map_advanced_opts2procs(advanced_opts) } \description{ Use MolEvolvR advanced options to get associated processes diff --git a/man/plotEstimatedWallTimes.Rd b/man/plot_estimated_walltimes.Rd similarity index 77% rename from man/plotEstimatedWallTimes.Rd rename to man/plot_estimated_walltimes.Rd index 0d53cb32..884fed50 100644 --- a/man/plotEstimatedWallTimes.Rd +++ b/man/plot_estimated_walltimes.Rd @@ -1,17 +1,17 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{plotEstimatedWallTimes} -\alias{plotEstimatedWallTimes} +\name{plot_estimated_walltimes} +\alias{plot_estimated_walltimes} \title{Plot the estimated runtimes for different advanced options and number of inputs} \usage{ -plotEstimatedWallTimes() +plot_estimated_walltimes() } \value{ line plot object example: -p <- plotEstimatedWallTimes() +p <- plot_estimated_walltimes() ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_ dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) } diff --git a/man/writeProcessRuntime2TSV.Rd b/man/write_proc_medians_table.Rd similarity index 77% rename from man/writeProcessRuntime2TSV.Rd rename to man/write_proc_medians_table.Rd index 03cbbd68..2ae7a97b 100644 --- a/man/writeProcessRuntime2TSV.Rd +++ b/man/write_proc_medians_table.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{writeProcessRuntime2TSV} -\alias{writeProcessRuntime2TSV} +\name{write_proc_medians_table} +\alias{write_proc_medians_table} \title{Write a table of 2 columns: 1) process and 2) median seconds} \usage{ -writeProcessRuntime2TSV(dir_job_results, filepath) +write_proc_medians_table(dir_job_results, filepath) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results} @@ -14,7 +14,7 @@ writeProcessRuntime2TSV(dir_job_results, filepath) \value{ \link{tbl_df} 2 columns: 1) process and 2) median seconds -example: writeProcessRuntime2TSV( +example: write_proc_medians_table( "/data/scratch/janani/molevolvr_out/", "/data/scratch/janani/molevolvr_out/log_tbl.tsv" ) diff --git a/man/writeProcessRuntimeToYML.Rd b/man/write_proc_medians_yml.Rd similarity index 74% rename from man/writeProcessRuntimeToYML.Rd rename to man/write_proc_medians_yml.Rd index e4a5c8ad..74757f1f 100644 --- a/man/writeProcessRuntimeToYML.Rd +++ b/man/write_proc_medians_yml.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{writeProcessRuntimeToYML} -\alias{writeProcessRuntimeToYML} +\name{write_proc_medians_yml} +\alias{write_proc_medians_yml} \title{Compute median process runtimes, then write a YAML list of the processes and their median runtimes in seconds to the path specified by 'filepath'.} \usage{ -writeProcessRuntimeToYML(dir_job_results, filepath = NULL) +write_proc_medians_yml(dir_job_results, filepath = NULL) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results directory} @@ -15,12 +15,12 @@ uses ./molevol_scripts/log_data/job_proc_weights.yml} } \description{ The default value of filepath is the value of the env var -MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntimeToYML() also uses as its default +MOLEVOLVR_PROC_WEIGHTS, which write_proc_medians_yml() also uses as its default read location. } \examples{ \dontrun{ -writeProcessRuntimeToYML( +write_proc_medians_yml( "/data/scratch/janani/molevolvr_out/", "/data/scratch/janani/molevolvr_out/log_tbl.yml" ) From a543898c8579065cbe3125f40b8cdf66200fc06f Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Thu, 10 Oct 2024 11:00:41 +0100 Subject: [PATCH 24/41] Renamed the following functions in R/assign_job_queue.R; MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit | Original | Modified | User Facing | |---------------------------------|----------------------------------|----------------------------------| | assign_job_queue | assignJobQueue | ✔️ | | make_opts2procs | mapOption2Process | ✔️ | | map_advanced_opts2procs | mapAdvOption2Process | ✔️ | | get_proc_medians | calculateProcessRuntime | ✔️ | | write_proc_medians_table | writeProcessRuntime2TSV | ✔️ | | write_proc_medians_yml | writeProcessRuntime2YML | ✔️ | | get_proc_weights | getProcessRuntimeWeights | ✔️ | | advanced_opts2est_walltime | calculateEstimatedWallTimeFromOpts| ✔️ | | plot_estimated_walltimes | plotEstimatedWallTimes | ✔️ | --- NAMESPACE | 18 ++-- R/assign_job_queue.R | 86 +++++++++---------- ...{assign_job_queue.Rd => assignJobQueue.Rd} | 12 +-- ... => calculateEstimatedWallTimeFromOpts.Rd} | 10 +-- ..._medians.Rd => calculateProcessRuntime.Rd} | 10 +-- ...weights.Rd => getProcessRuntimeWeights.Rd} | 8 +- ..._opts2procs.Rd => mapAdvOption2Process.Rd} | 8 +- ...ake_opts2procs.Rd => mapOption2Process.Rd} | 8 +- ...walltimes.Rd => plotEstimatedWallTimes.Rd} | 8 +- ...ns_table.Rd => writeProcessRuntime2TSV.Rd} | 8 +- ...ians_yml.Rd => writeProcessRuntime2YML.Rd} | 10 +-- 11 files changed, 93 insertions(+), 93 deletions(-) rename man/{assign_job_queue.Rd => assignJobQueue.Rd} (68%) rename man/{advanced_opts2est_walltime.Rd => calculateEstimatedWallTimeFromOpts.Rd} (74%) rename man/{get_proc_medians.Rd => calculateProcessRuntime.Rd} (76%) rename man/{get_proc_weights.Rd => getProcessRuntimeWeights.Rd} (73%) rename man/{map_advanced_opts2procs.Rd => mapAdvOption2Process.Rd} (76%) rename man/{make_opts2procs.Rd => mapOption2Process.Rd} (75%) rename man/{plot_estimated_walltimes.Rd => plotEstimatedWallTimes.Rd} (77%) rename man/{write_proc_medians_table.Rd => writeProcessRuntime2TSV.Rd} (77%) rename man/{write_proc_medians_yml.Rd => writeProcessRuntime2YML.Rd} (74%) diff --git a/NAMESPACE b/NAMESPACE index c811bac3..65cc791e 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -15,10 +15,11 @@ export(add_leaves) export(add_lins) export(add_name) export(add_tax) -export(advanced_opts2est_walltime) export(alignFasta) export(assert_count_df) -export(assign_job_queue) +export(assignJobQueue) +export(calculateEstimatedWallTimeFromOpts) +export(calculateProcessRuntime) export(cleanClusters) export(cleanDomainArchitecture) export(cleanGeneDescription) @@ -53,10 +54,9 @@ export(generate_all_aln2fa) export(generate_fa2tre) export(generate_msa) export(generate_trees) +export(getProcessRuntimeWeights) export(get_accnums_from_fasta_file) export(get_job_message) -export(get_proc_medians) -export(get_proc_weights) export(ipg2lin) export(ipr2viz) export(ipr2viz_web) @@ -66,12 +66,12 @@ export(lineage.domain_repeats.plot) export(lineage.neighbors.plot) export(lineage_sunburst) export(make_job_results_url) -export(make_opts2procs) export(mapAcc2Name) +export(mapAdvOption2Process) +export(mapOption2Process) export(map_acc2name) -export(map_advanced_opts2procs) export(msa_pdf) -export(plot_estimated_walltimes) +export(plotEstimatedWallTimes) export(prot2tax) export(prot2tax_old) export(removeAsterisks) @@ -103,8 +103,8 @@ export(wordcloud2_element) export(wordcloud3) export(wordcloud_element) export(write.MsaAAMultipleAlignment) -export(write_proc_medians_table) -export(write_proc_medians_yml) +export(writeProcessRuntime2TSV) +export(writeProcessRuntime2YML) importFrom(Biostrings,AAStringSet) importFrom(Biostrings,readAAStringSet) importFrom(Biostrings,toString) diff --git a/R/assign_job_queue.R b/R/assign_job_queue.R index c531fb09..10df1e3a 100644 --- a/R/assign_job_queue.R +++ b/R/assign_job_queue.R @@ -3,16 +3,16 @@ # pipeline. # to use this, construct paths like so: file.path(common_root, "path", "to", "file.R") # for example, the reference for this file would be: -# file.path(common_root, "molevol_scripts", "R", "assign_job_queue.R") +# file.path(common_root, "molevol_scripts", "R", "assignJobQueue.R") common_root <- Sys.getenv("COMMON_SRC_ROOT") #' Construct list where names (MolEvolvR advanced options) point to processes #' #' @return list where names (MolEvolvR advanced options) point to processes #' -#' example: list_opts2procs <- make_opts2procs +#' example: list_opts2procs <- mapOption2Process #' @export -make_opts2procs <- function() { +mapOption2Process <- function() { tryCatch({ opts2processes <- list( "homology_search" = c("dblast", "dblast_cleanup"), @@ -26,7 +26,7 @@ make_opts2procs <- function() { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("make_opts2procs function execution completed.") + message("mapOption2Process function execution completed.") }) } @@ -40,16 +40,16 @@ make_opts2procs <- function() { #' #' example: #' advanced_opts <- c("homology_search", "domain_architecture") -#' procs <- map_advanced_opts2procs(advanced_opts) +#' procs <- mapAdvOption2Process(advanced_opts) #' @export -map_advanced_opts2procs <- function(advanced_opts) { +mapAdvOption2Process <- function(advanced_opts) { if (!is.character(advanced_opts)) { stop("Argument must be a character vector!") } tryCatch({ # append 'always' to add procs that always run advanced_opts <- c(advanced_opts, "always") - opts2proc <- make_opts2procs() + opts2proc <- mapOption2Process() # setup index for opts2proc based on advanced options idx <- which(names(opts2proc) %in% advanced_opts) # extract processes that will run @@ -60,7 +60,7 @@ map_advanced_opts2procs <- function(advanced_opts) { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("make_opts2procs function execution completed.") + message("mapOption2Process function execution completed.") }) } @@ -80,14 +80,14 @@ map_advanced_opts2procs <- function(advanced_opts) { #' #' 1) #' dir_job_results <- "/data/scratch/janani/molevolvr_out" -#' list_proc_medians <- get_proc_medians(dir_job_results) +#' list_proc_medians <- calculateProcessRuntime(dir_job_results) #' #' 2) from outside container environment #' common_root <- "/data/molevolvr_transfer/molevolvr_dev" #' dir_job_results <- "/data/molevolvr_transfer/molevolvr_dev/job_results" -#' list_proc_medians <- get_proc_medians(dir_job_results) +#' list_proc_medians <- calculateProcessRuntime(dir_job_results) #' @export -get_proc_medians <- function(dir_job_results) { +calculateProcessRuntime <- function(dir_job_results) { tryCatch({ # Check if dir_job_results is a character string if (!is.character(dir_job_results) || length(dir_job_results) != 1) { @@ -139,7 +139,7 @@ get_proc_medians <- function(dir_job_results) { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("get_proc_medians function execution completed.") + message("calculateProcessRuntime function execution completed.") }) } @@ -156,12 +156,12 @@ get_proc_medians <- function(dir_job_results) { #' #' @return [tbl_df] 2 columns: 1) process and 2) median seconds #' -#' example: write_proc_medians_table( +#' example: writeProcessRuntime2TSV( #' "/data/scratch/janani/molevolvr_out/", #' "/data/scratch/janani/molevolvr_out/log_tbl.tsv" #' ) #' @export -write_proc_medians_table <- function(dir_job_results, filepath) { +writeProcessRuntime2TSV <- function(dir_job_results, filepath) { tryCatch({ # Error handling for input arguments if (!is.character(dir_job_results) || length(dir_job_results) != 1) { @@ -175,7 +175,7 @@ write_proc_medians_table <- function(dir_job_results, filepath) { if (!is.character(filepath) || length(filepath) != 1) { stop("Input 'filepath' must be a single character string.") } - df_proc_medians <- get_proc_medians(dir_job_results) |> + df_proc_medians <- calculateProcessRuntime(dir_job_results) |> tibble::as_tibble() |> tidyr::pivot_longer( dplyr::everything(), @@ -192,7 +192,7 @@ write_proc_medians_table <- function(dir_job_results, filepath) { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("write_proc_medians_table function execution completed.") + message("writeProcessRuntime2TSV function execution completed.") }) } @@ -201,7 +201,7 @@ write_proc_medians_table <- function(dir_job_results, filepath) { #' their median runtimes in seconds to the path specified by 'filepath'. #' #' The default value of filepath is the value of the env var -#' MOLEVOLVR_PROC_WEIGHTS, which write_proc_medians_yml() also uses as its default +#' MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntime2YML() also uses as its default #' read location. #' #' @param dir_job_results [chr] path to MolEvolvR job_results directory @@ -212,13 +212,13 @@ write_proc_medians_table <- function(dir_job_results, filepath) { #' #' @examples #' \dontrun{ -#' write_proc_medians_yml( +#' writeProcessRuntime2YML( #' "/data/scratch/janani/molevolvr_out/", #' "/data/scratch/janani/molevolvr_out/log_tbl.yml" #' ) #' } #' @export -write_proc_medians_yml <- function(dir_job_results, filepath = NULL) { +writeProcessRuntime2YML <- function(dir_job_results, filepath = NULL) { tryCatch({ # Error handling for dir_job_results arguments if (!is.character(dir_job_results) || length(dir_job_results) != 1) { @@ -238,14 +238,14 @@ write_proc_medians_yml <- function(dir_job_results, filepath = NULL) { stop("Input 'filepath' must be a single character string.") } - medians <- get_proc_medians(dir_job_results) + medians <- calculateProcessRuntime(dir_job_results) yaml::write_yaml(medians, filepath) }, error = function(e) { message(paste("Encountered an error: "), e$message) }, warning = function(w) { message(paste("Warning: "), w$message) }, finally = { - message("write_proc_medians_table function execution completed.") + message("writeProcessRuntime2TSV function execution completed.") } ) @@ -261,9 +261,9 @@ write_proc_medians_yml <- function(dir_job_results, filepath = NULL) { #' #' @return [list] names: processes; values: median runtime (seconds) #' -#' example: write_proc_medians_yml() +#' example: writeProcessRuntime2YML() #' @export -get_proc_weights <- function(medians_yml_path = NULL) { +getProcessRuntimeWeights <- function(medians_yml_path = NULL) { if (is.null(medians_yml_path)) { medians_yml_path <- file.path(common_root, "molevol_scripts", @@ -273,7 +273,7 @@ get_proc_weights <- function(medians_yml_path = NULL) { proc_weights <- tryCatch({ # attempt to read the weights from the YAML file produced by - # write_proc_medians_yml() + # writeProcessRuntime2YML() if (stringr::str_trim(medians_yml_path) == "") { stop( stringr::str_glue("medians_yml_path is empty @@ -285,7 +285,7 @@ get_proc_weights <- function(medians_yml_path = NULL) { }, # to avoid fatal errors in reading the proc weights yaml, # some median process runtimes have been hardcoded based on - # the result of get_proc_medians() from Jan 2024 + # the result of calculateProcessRuntime() from Jan 2024 error = function(cond) { proc_weights <- list( "dblast" = 2810, @@ -306,7 +306,7 @@ get_proc_weights <- function(medians_yml_path = NULL) { #' calculate the total estimated walltime for the job #' #' @param advanced_opts character vector of MolEvolvR advanced options -#' (see make_opts2procs for the options) +#' (see mapOption2Process for the options) #' @param n_inputs total number of input proteins #' #' @importFrom dplyr if_else @@ -314,11 +314,11 @@ get_proc_weights <- function(medians_yml_path = NULL) { #' #' @return total estimated number of seconds a job will process (walltime) #' -#' example: advanced_opts2est_walltime (c("homology_search", +#' example: calculateEstimatedWallTimeFromOpts (c("homology_search", #' "domain_architecture"), #' n_inputs = 3, n_hits = 50L) #' @export -advanced_opts2est_walltime <- function(advanced_opts, +calculateEstimatedWallTimeFromOpts <- function(advanced_opts, n_inputs = 1L, n_hits = NULL, verbose = FALSE) { @@ -348,7 +348,7 @@ advanced_opts2est_walltime <- function(advanced_opts, } # Get process weights - proc_weights <- write_proc_medians_yml() + proc_weights <- writeProcessRuntime2YML() if (!is.list(proc_weights)) { stop("Process weights could not be retrieved correctly.") } @@ -357,7 +357,7 @@ advanced_opts2est_walltime <- function(advanced_opts, proc_weights <- proc_weights[order(names(proc_weights))] |> unlist() all_procs <- names(proc_weights) |> sort() # get processes from advanced options and sort by names - procs_from_opts <- map_advanced_opts2procs(advanced_opts) + procs_from_opts <- mapAdvOption2Process(advanced_opts) procs_from_opts <- sort(procs_from_opts) # binary encode: yes proc will run (1); else 0 binary_proc_vec <- dplyr::if_else(all_procs %in% procs_from_opts, 1L, 0L) @@ -366,7 +366,7 @@ advanced_opts2est_walltime <- function(advanced_opts, as.numeric() # calculate the additional processes to run for the homologous hits if ("homology_search" %in% advanced_opts) { - opts2procs <- make_opts2procs() + opts2procs <- mapOption2Process() # exclude the homology search processes for the homologous hits procs2exclude_for_homologs <- opts2procs[["homology_search"]] procs_homologs <- procs_from_opts[!(procs_from_opts @@ -380,7 +380,7 @@ advanced_opts2est_walltime <- function(advanced_opts, } if (verbose) { msg <- stringr::str_glue( - "warnings from advanced_opts2est_walltime ():\n", + "warnings from calculateEstimatedWallTimeFromOpts ():\n", "\tn_inputs={n_inputs}\n", "\tn_hits={ifelse(is.null(n_hits), 'null', n_hits)}\n", "\test_walltime={est_walltime}\n\n" @@ -393,7 +393,7 @@ advanced_opts2est_walltime <- function(advanced_opts, }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("advanced_opts2est_walltime + message("calculateEstimatedWallTimeFromOpts function execution completed.") }) @@ -403,18 +403,18 @@ advanced_opts2est_walltime <- function(advanced_opts, #' Decision function to assign job queue #' #' @param t_sec_estimate estimated number of seconds a job will process -#' (from advanced_opts2est_walltime ()) +#' (from calculateEstimatedWallTimeFromOpts ()) #' @param t_long threshold value that defines the lower bound for assigning a #' job to the "long queue" #' #' @return a string of "short" or "long" #' #' example: -#' advanced_opts2est_walltime (c("homology_search", +#' calculateEstimatedWallTimeFromOpts (c("homology_search", #' "domain_architecture"), 3) |> -#' assign_job_queue() +#' assignJobQueue() #' @export -assign_job_queue <- function( +assignJobQueue <- function( t_sec_estimate, t_cutoff = 21600 # 6 hours ) { @@ -434,7 +434,7 @@ assign_job_queue <- function( }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("assign_job_queue function execution completed.") + message("assignJobQueue function execution completed.") }) } @@ -451,13 +451,13 @@ assign_job_queue <- function( #' @return line plot object #' #' example: -#' p <- plot_estimated_walltimes() +#' p <- plotEstimatedWallTimes() #' ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_ #' dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) #' @export -plot_estimated_walltimes <- function() { +plotEstimatedWallTimes <- function() { tryCatch({ - opts <- make_opts2procs() |> names() + opts <- mapOption2Process() |> names() # get all possible submission permutations (powerset) get_powerset <- function(vec) { # generate powerset (do not include empty set) @@ -482,7 +482,7 @@ plot_estimated_walltimes <- function() { } else { NULL } - est_walltime <- advanced_opts2est_walltime ( + est_walltime <- calculateEstimatedWallTimeFromOpts ( advanced_opts, n_inputs = i, n_hits = n_hits, @@ -541,7 +541,7 @@ plot_estimated_walltimes <- function() { }, warning = function(w) { message(paste("Warning: ", w$message)) }, finally = { - message("plot_estimated_walltimes function execution completed.") + message("plotEstimatedWallTimes function execution completed.") }) } diff --git a/man/assign_job_queue.Rd b/man/assignJobQueue.Rd similarity index 68% rename from man/assign_job_queue.Rd rename to man/assignJobQueue.Rd index d2650fed..3663ce56 100644 --- a/man/assign_job_queue.Rd +++ b/man/assignJobQueue.Rd @@ -1,14 +1,14 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{assign_job_queue} -\alias{assign_job_queue} +\name{assignJobQueue} +\alias{assignJobQueue} \title{Decision function to assign job queue} \usage{ -assign_job_queue(t_sec_estimate, t_cutoff = 21600) +assignJobQueue(t_sec_estimate, t_cutoff = 21600) } \arguments{ \item{t_sec_estimate}{estimated number of seconds a job will process -(from advanced_opts2est_walltime ())} +(from calculateEstimatedWallTimeFromOpts ())} \item{t_long}{threshold value that defines the lower bound for assigning a job to the "long queue"} @@ -17,9 +17,9 @@ job to the "long queue"} a string of "short" or "long" example: -advanced_opts2est_walltime (c("homology_search", +calculateEstimatedWallTimeFromOpts (c("homology_search", "domain_architecture"), 3) |> -assign_job_queue() +assignJobQueue() } \description{ Decision function to assign job queue diff --git a/man/advanced_opts2est_walltime.Rd b/man/calculateEstimatedWallTimeFromOpts.Rd similarity index 74% rename from man/advanced_opts2est_walltime.Rd rename to man/calculateEstimatedWallTimeFromOpts.Rd index 02ae9621..c09cf6a6 100644 --- a/man/advanced_opts2est_walltime.Rd +++ b/man/calculateEstimatedWallTimeFromOpts.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{advanced_opts2est_walltime} -\alias{advanced_opts2est_walltime} +\name{calculateEstimatedWallTimeFromOpts} +\alias{calculateEstimatedWallTimeFromOpts} \title{Given MolEvolvR advanced options and number of inputs, calculate the total estimated walltime for the job} \usage{ -advanced_opts2est_walltime( +calculateEstimatedWallTimeFromOpts( advanced_opts, n_inputs = 1L, n_hits = NULL, @@ -14,14 +14,14 @@ advanced_opts2est_walltime( } \arguments{ \item{advanced_opts}{character vector of MolEvolvR advanced options -(see make_opts2procs for the options)} +(see mapOption2Process for the options)} \item{n_inputs}{total number of input proteins} } \value{ total estimated number of seconds a job will process (walltime) -example: advanced_opts2est_walltime (c("homology_search", +example: calculateEstimatedWallTimeFromOpts (c("homology_search", "domain_architecture"), n_inputs = 3, n_hits = 50L) } diff --git a/man/get_proc_medians.Rd b/man/calculateProcessRuntime.Rd similarity index 76% rename from man/get_proc_medians.Rd rename to man/calculateProcessRuntime.Rd index b6db0b56..bb6dd1ed 100644 --- a/man/get_proc_medians.Rd +++ b/man/calculateProcessRuntime.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{get_proc_medians} -\alias{get_proc_medians} +\name{calculateProcessRuntime} +\alias{calculateProcessRuntime} \title{Scrape MolEvolvR logs and calculate median processes} \usage{ -get_proc_medians(dir_job_results) +calculateProcessRuntime(dir_job_results) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results @@ -21,12 +21,12 @@ examples: } dir_job_results <- "/data/scratch/janani/molevolvr_out" -list_proc_medians <- get_proc_medians(dir_job_results) +list_proc_medians <- calculateProcessRuntime(dir_job_results) \enumerate{ \item from outside container environment common_root <- "/data/molevolvr_transfer/molevolvr_dev" dir_job_results <- "/data/molevolvr_transfer/molevolvr_dev/job_results" -list_proc_medians <- get_proc_medians(dir_job_results) +list_proc_medians <- calculateProcessRuntime(dir_job_results) } } \description{ diff --git a/man/get_proc_weights.Rd b/man/getProcessRuntimeWeights.Rd similarity index 73% rename from man/get_proc_weights.Rd rename to man/getProcessRuntimeWeights.Rd index f48585cc..ff3c8e5d 100644 --- a/man/get_proc_weights.Rd +++ b/man/getProcessRuntimeWeights.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{get_proc_weights} -\alias{get_proc_weights} +\name{getProcessRuntimeWeights} +\alias{getProcessRuntimeWeights} \title{Quickly get the runtime weights for MolEvolvR backend processes} \usage{ -get_proc_weights(medians_yml_path = NULL) +getProcessRuntimeWeights(medians_yml_path = NULL) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results @@ -13,7 +13,7 @@ directory} \value{ \link{list} names: processes; values: median runtime (seconds) -example: write_proc_medians_yml() +example: writeProcessRuntime2YML() } \description{ Quickly get the runtime weights for MolEvolvR backend processes diff --git a/man/map_advanced_opts2procs.Rd b/man/mapAdvOption2Process.Rd similarity index 76% rename from man/map_advanced_opts2procs.Rd rename to man/mapAdvOption2Process.Rd index 631708b4..5bd9ee65 100644 --- a/man/map_advanced_opts2procs.Rd +++ b/man/mapAdvOption2Process.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{map_advanced_opts2procs} -\alias{map_advanced_opts2procs} +\name{mapAdvOption2Process} +\alias{mapAdvOption2Process} \title{Use MolEvolvR advanced options to get associated processes} \usage{ -map_advanced_opts2procs(advanced_opts) +mapAdvOption2Process(advanced_opts) } \arguments{ \item{advanced_opts}{character vector of MolEvolvR advanced options} @@ -15,7 +15,7 @@ the advanced options example: advanced_opts <- c("homology_search", "domain_architecture") -procs <- map_advanced_opts2procs(advanced_opts) +procs <- mapAdvOption2Process(advanced_opts) } \description{ Use MolEvolvR advanced options to get associated processes diff --git a/man/make_opts2procs.Rd b/man/mapOption2Process.Rd similarity index 75% rename from man/make_opts2procs.Rd rename to man/mapOption2Process.Rd index 07e208b2..ff6905c5 100644 --- a/man/make_opts2procs.Rd +++ b/man/mapOption2Process.Rd @@ -1,15 +1,15 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{make_opts2procs} -\alias{make_opts2procs} +\name{mapOption2Process} +\alias{mapOption2Process} \title{Construct list where names (MolEvolvR advanced options) point to processes} \usage{ -make_opts2procs() +mapOption2Process() } \value{ list where names (MolEvolvR advanced options) point to processes -example: list_opts2procs <- make_opts2procs +example: list_opts2procs <- mapOption2Process } \description{ Construct list where names (MolEvolvR advanced options) point to processes diff --git a/man/plot_estimated_walltimes.Rd b/man/plotEstimatedWallTimes.Rd similarity index 77% rename from man/plot_estimated_walltimes.Rd rename to man/plotEstimatedWallTimes.Rd index 884fed50..0d53cb32 100644 --- a/man/plot_estimated_walltimes.Rd +++ b/man/plotEstimatedWallTimes.Rd @@ -1,17 +1,17 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{plot_estimated_walltimes} -\alias{plot_estimated_walltimes} +\name{plotEstimatedWallTimes} +\alias{plotEstimatedWallTimes} \title{Plot the estimated runtimes for different advanced options and number of inputs} \usage{ -plot_estimated_walltimes() +plotEstimatedWallTimes() } \value{ line plot object example: -p <- plot_estimated_walltimes() +p <- plotEstimatedWallTimes() ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_ dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) } diff --git a/man/write_proc_medians_table.Rd b/man/writeProcessRuntime2TSV.Rd similarity index 77% rename from man/write_proc_medians_table.Rd rename to man/writeProcessRuntime2TSV.Rd index 2ae7a97b..03cbbd68 100644 --- a/man/write_proc_medians_table.Rd +++ b/man/writeProcessRuntime2TSV.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{write_proc_medians_table} -\alias{write_proc_medians_table} +\name{writeProcessRuntime2TSV} +\alias{writeProcessRuntime2TSV} \title{Write a table of 2 columns: 1) process and 2) median seconds} \usage{ -write_proc_medians_table(dir_job_results, filepath) +writeProcessRuntime2TSV(dir_job_results, filepath) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results} @@ -14,7 +14,7 @@ write_proc_medians_table(dir_job_results, filepath) \value{ \link{tbl_df} 2 columns: 1) process and 2) median seconds -example: write_proc_medians_table( +example: writeProcessRuntime2TSV( "/data/scratch/janani/molevolvr_out/", "/data/scratch/janani/molevolvr_out/log_tbl.tsv" ) diff --git a/man/write_proc_medians_yml.Rd b/man/writeProcessRuntime2YML.Rd similarity index 74% rename from man/write_proc_medians_yml.Rd rename to man/writeProcessRuntime2YML.Rd index 74757f1f..b43f39ee 100644 --- a/man/write_proc_medians_yml.Rd +++ b/man/writeProcessRuntime2YML.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/assign_job_queue.R -\name{write_proc_medians_yml} -\alias{write_proc_medians_yml} +\name{writeProcessRuntime2YML} +\alias{writeProcessRuntime2YML} \title{Compute median process runtimes, then write a YAML list of the processes and their median runtimes in seconds to the path specified by 'filepath'.} \usage{ -write_proc_medians_yml(dir_job_results, filepath = NULL) +writeProcessRuntime2YML(dir_job_results, filepath = NULL) } \arguments{ \item{dir_job_results}{\link{chr} path to MolEvolvR job_results directory} @@ -15,12 +15,12 @@ uses ./molevol_scripts/log_data/job_proc_weights.yml} } \description{ The default value of filepath is the value of the env var -MOLEVOLVR_PROC_WEIGHTS, which write_proc_medians_yml() also uses as its default +MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntime2YML() also uses as its default read location. } \examples{ \dontrun{ -write_proc_medians_yml( +writeProcessRuntime2YML( "/data/scratch/janani/molevolvr_out/", "/data/scratch/janani/molevolvr_out/log_tbl.yml" ) From 2da3d1a1eadb1c3d6f140700444e15db46c341d2 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 08:40:17 -0600 Subject: [PATCH 25/41] summarize.R adjustments - add back importFrom n_distinct() as it appears to be used by summarizeGenContext() - use function call as title -- may specify this in MolEvolvR style guide for consistency - adjust Rd grouping with MolEvolvR_summary @rdname tag for functions that had a clear summary element. This will hopefully avoid confusion with the rather ubiquitous dplyr::summarize - converted some code comments to placeholder descriptions --- NAMESPACE | 1 + R/summarize.R | 58 +++---- man/{summarize.Rd => MolEvolvR_summary.Rd} | 159 ++++---------------- man/countbycolumn.Rd | 26 +++- man/elements2Words.Rd | 25 ++- man/filterbydomains.Rd | 2 +- man/filterbyfrequency.Rd | 14 +- man/findparalogs.Rd | 2 +- man/summarizeDomArch.Rd | 22 --- man/summarizeDomArch_ByLineage.Rd | 22 --- man/summarizeGenContext.Rd | 22 --- man/summarizeGenContext_ByDomArchLineage.Rd | 22 --- man/summarizeGenContext_ByLineage.Rd | 22 --- man/summarizebylineage.Rd | 25 --- man/totalgencontextordomarchcounts.Rd | 42 ------ man/words2wordcounts.Rd | 13 +- 16 files changed, 122 insertions(+), 355 deletions(-) rename man/{summarize.Rd => MolEvolvR_summary.Rd} (52%) delete mode 100644 man/summarizeDomArch.Rd delete mode 100644 man/summarizeDomArch_ByLineage.Rd delete mode 100644 man/summarizeGenContext.Rd delete mode 100644 man/summarizeGenContext_ByDomArchLineage.Rd delete mode 100644 man/summarizeGenContext_ByLineage.Rd delete mode 100644 man/summarizebylineage.Rd delete mode 100644 man/totalgencontextordomarchcounts.Rd diff --git a/NAMESPACE b/NAMESPACE index 2326fc1f..53332439 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -139,6 +139,7 @@ importFrom(dplyr,if_else) importFrom(dplyr,left_join) importFrom(dplyr,mutate) importFrom(dplyr,n) +importFrom(dplyr,n_distinct) importFrom(dplyr,pull) importFrom(dplyr,relocate) importFrom(dplyr,right_join) diff --git a/R/summarize.R b/R/summarize.R index 321a0488..2816f174 100644 --- a/R/summarize.R +++ b/R/summarize.R @@ -10,7 +10,7 @@ # suppressPackageStartupMessages(library(rlang)) # conflicted::conflict_prefer("filter", "dplyr") -#' Filter by Domains +#' filterByDomains #' #' @author Samuel Chen, Janani Ravi #' @description filterByDomains filters a data frame by identifying exact domain matches @@ -29,7 +29,6 @@ #' #' @return Filtered data frame #' @note There is no need to make the domains 'regex safe', that will be handled by this function -#' @name summarize #' @export #' #' @examples @@ -89,9 +88,11 @@ filterByDomains <- function(prot, column = "DomArch", doms_keep = c(), doms_remo ## COUNTS of DAs and GCs ## ## Before/after break up ## ########################### -## Function to obtain element counts (DA, GC) -#' Count By Column -#' + +#' countByColumn +#' @description +#' Function to obtain element counts (DA, GC) +#' #' @param prot A data frame containing the dataset to analyze, typically with #' multiple columns including the one specified by the `column` parameter. #' @param column A character string specifying the name of the column to analyze. @@ -111,7 +112,6 @@ filterByDomains <- function(prot, column = "DomArch", doms_keep = c(), doms_remo #' The tibble is filtered to only include elements that have a frequency #' greater than or equal to `min.freq` and does not include elements with `NA` #' values or those starting with a hyphen ("-"). -#' @name summarize #' @export #' #' @examples @@ -131,7 +131,7 @@ countByColumn <- function(prot = prot, column = "DomArch", min.freq = 1) { return(counts) } -#' Elements 2 Words +#' elements2Words #' #' @description #' Break string ELEMENTS into WORDS for domain architecture (DA) and genomic @@ -156,7 +156,6 @@ countByColumn <- function(prot = prot, column = "DomArch", min.freq = 1) { #' @return A single string where elements are delimited by spaces. The function #' performs necessary substitutions based on the `conversion_type` and cleans up #' extraneous characters like newlines, tabs, and multiple spaces. -#' @name summarize #' #' @examples #' \dontrun{ @@ -196,7 +195,7 @@ elements2Words <- function(prot, column = "DomArch", conversion_type = "da2doms" return(z3) } -#' Words 2 Word Counts +#' words2WordCounts #' #' @description #' Get word counts (wc) [DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)] @@ -215,7 +214,6 @@ elements2Words <- function(prot, column = "DomArch", conversion_type = "da2doms" #' \item{`freq`}{A column containing the frequency counts for each word.} #' } #' -#' @name summarize #' #' @examples #' \dontrun{ @@ -252,9 +250,11 @@ words2WordCounts <- function(string) { arrange(-freq) return(df_word_count) } -## Function to filter based on frequencies -#' Filter Frequency -#' + +#' filterByFrequency +#' @description +#' Function to filter based on frequencies +#' #' @param x A tibble (tbl_df) containing at least two columns: one for #' elements (e.g., `words`) and one for their frequency (e.g., `freq`). #' @param min.freq A numeric value specifying the minimum frequency threshold. @@ -263,7 +263,6 @@ words2WordCounts <- function(string) { #' #' @return A tibble with the same structure as `x`, but filtered to include #' only rows where the frequency is greater than or equal to `min.freq`. -#' @name summarize #' #' @export #' @@ -279,7 +278,14 @@ filterByFrequency <- function(x, min.freq) { ######################### ## SUMMARY FUNCTIONS #### ######################### -#' Summarize by Lineage +#' MolEvolvR Summary +#' @name MolEvolvR_summary +#' @description +#' A collection of summary functions for the MolEvolvR package. +#' +NULL + +#' summarizeByLineage #' #' @param prot A dataframe or tibble containing the data. #' @param column A string representing the column to be summarized @@ -295,7 +301,7 @@ filterByFrequency <- function(x, min.freq) { #' @return A tibble summarizing the counts of occurrences of elements in #' the `column`, grouped by the `by` column. The result includes the number #' of occurrences (`count`) and is arranged in descending order of count. -#' @name summarize +#' @rdname MolEvolvR_summary #' @export #' #' @examples @@ -341,7 +347,7 @@ summarizeByLineage <- function(prot = "prot", column = "DomArch", by = "Lineage" #' columns: `DomArch`, `Lineage`, and `count`, which indicates the frequency #' of each domain architecture for each lineage. The results are arranged in #' descending order of `count`. -#' @name summarize +#' @rdname MolEvolvR_summary #' #' @export #' @@ -357,7 +363,7 @@ summarizeDomArch_ByLineage <- function(x) { arrange(desc(count)) } -## Function to retrieve counts of how many lineages a DomArch appears in + #' summarizeDomArch #' #' @description @@ -375,7 +381,7 @@ summarizeDomArch_ByLineage <- function(x) { #' - `totallin`: The total number of unique lineages in which each `DomArch` #' appears. #' The results are arranged in descending order of `totallin` and `totalcount`. -#' @name summarize +#' @rdname MolEvolvR_summary #' @export #' #' @examples @@ -407,7 +413,7 @@ summarizeDomArch <- function(x) { #' `GenContext`, `DomArch`, and `Lineage`. #' #' The results are arranged in descending order of `count`. -#' @name summarize +#' @rdname MolEvolvR_summary #' @export #' #' @examples @@ -432,7 +438,7 @@ summarizeGenContext_ByDomArchLineage <- function(x) { #' @importFrom dplyr arrange desc filter group_by n summarise #' #' @return Describe return, in detail -#' @name summarize +#' @rdname MolEvolvR_summary #' @export #' #' @examples @@ -455,7 +461,7 @@ summarizeGenContext_ByLineage <- function(x) { #' @param x A dataframe or tibble containing the data. It must have columns #' named `GenContext`, `DomArch`, and `Lineage`. #' -#' @importFrom dplyr arrange desc filter group_by n summarise +#' @importFrom dplyr arrange desc filter group_by n n_distinct summarise #' #' @return A tibble summarizing each unique combination of `GenContext` and #' `Lineage`, along with the following columns: @@ -465,7 +471,7 @@ summarizeGenContext_ByLineage <- function(x) { #' `GenContext` and `Lineage`. #' #' The results are arranged in descending order of `count`. -#' @name summarize +#' @rdname MolEvolvR_summary #' @export #' #' @examples @@ -487,7 +493,7 @@ summarizeGenContext <- function(x) { ################## -#' Total Counts +#' totalGenContextOrDomArchCounts #' #' @description #' Creates a data frame with a totalcount column @@ -518,7 +524,7 @@ summarizeGenContext <- function(x) { #' - `IndividualCountPercent`: The percentage of each `totalcount` relative to #' the overall count. #' - `CumulativePercent`: The cumulative percentage of total counts. -#' @name summarize +#' @rdname MolEvolvR_summary #' @export #' #' @note Please refer to the source code if you have alternate file formats and/or @@ -670,7 +676,7 @@ totalGenContextOrDomArchCounts <- function(prot, column = "DomArch", lineage_col -#' Find Paralogs +#' findParalogs #' #' @description #' Creates a data frame of paralogs. diff --git a/man/summarize.Rd b/man/MolEvolvR_summary.Rd similarity index 52% rename from man/summarize.Rd rename to man/MolEvolvR_summary.Rd index f149f686..262c4719 100644 --- a/man/summarize.Rd +++ b/man/MolEvolvR_summary.Rd @@ -1,50 +1,29 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/summarize.R -\name{summarize} -\alias{summarize} -\alias{filter_by_doms} -\alias{count_bycol} -\alias{elements2words} -\alias{words2wc} -\alias{filter_freq} -\alias{summarize_bylin} -\alias{summ.DA.byLin} -\alias{summ.DA} -\alias{summ.GC.byDALin} -\alias{summ.GC.byLin} -\alias{summ.GC} -\alias{total_counts} -\title{Filter by Domains} +\name{MolEvolvR_summary} +\alias{MolEvolvR_summary} +\alias{summarizeByLineage} +\alias{summarizeDomArch_ByLineage} +\alias{summarizeDomArch} +\alias{summarizeGenContext_ByDomArchLineage} +\alias{summarizeGenContext_ByLineage} +\alias{summarizeGenContext} +\alias{totalGenContextOrDomArchCounts} +\title{MolEvolvR Summary} \usage{ -filter_by_doms( - prot, - column = "DomArch", - doms_keep = c(), - doms_remove = c(), - ignore.case = FALSE -) - -count_bycol(prot = prot, column = "DomArch", min.freq = 1) - -elements2words(prot, column = "DomArch", conversion_type = "da2doms") - -words2wc(string) +summarizeByLineage(prot = "prot", column = "DomArch", by = "Lineage", query) -filter_freq(x, min.freq) +summarizeDomArch_ByLineage(x) -summarize_bylin(prot = "prot", column = "DomArch", by = "Lineage", query) +summarizeDomArch(x) -summ.DA.byLin(x) +summarizeGenContext_ByDomArchLineage(x) -summ.DA(x) +summarizeGenContext_ByLineage(x) -summ.GC.byDALin(x) +summarizeGenContext(x) -summ.GC.byLin(x) - -summ.GC(x) - -total_counts( +totalGenContextOrDomArchCounts( prot, column = "DomArch", lineage_col = "Lineage", @@ -59,39 +38,15 @@ total_counts( \item{column}{Character. The column to summarize, default is "DomArch".} -\item{doms_keep}{Vector of domains that must be identified within column in order for -observation to be kept} - -\item{doms_remove}{Vector of domains that, if found within an observation, will be removed} - -\item{ignore.case}{Should the matching be non case sensitive} - -\item{min.freq}{A numeric value specifying the minimum frequency threshold. -Only elements with frequencies greater than or equal to this value will be -retained.} - -\item{conversion_type}{A character string specifying the type of conversion. -Two options are available: -\describe{ -\item{\code{da2doms}}{Convert domain architectures into individual domains by -replacing \code{+} symbols with spaces.} -\item{\code{gc2da}}{Convert genomic context into domain architectures by -replacing directional symbols (\verb{<-}, \verb{->}, and \code{|}) with spaces.} -}} - -\item{string}{A character string containing the elements (words) to count. -This would typically be a space-delimited string representing domain -architectures or genomic contexts.} - -\item{x}{A dataframe or tibble containing the data. It must have columns -named \code{GenContext}, \code{DomArch}, and \code{Lineage}.} - \item{by}{A string representing the grouping column (e.g., \code{Lineage}). Default is "Lineage".} \item{query}{A string specifying the query pattern for filtering the target column. Use "all" to skip filtering and include all rows.} +\item{x}{A dataframe or tibble containing the data. It must have columns +named \code{GenContext}, \code{DomArch}, and \code{Lineage}.} + \item{lineage_col}{Character. The name of the lineage column, default is "Lineage".} @@ -105,33 +60,6 @@ cutoff. Default is FALSE.} Default is 2.} } \value{ -Filtered data frame - -A tibble with two columns: -\describe{ -\item{\code{column}}{The unique elements from the specified column -(e.g., "DomArch").} -\item{\code{freq}}{The frequency of each element, i.e., the number of times -each element appears in the specified column.} -} -The tibble is filtered to only include elements that have a frequency -greater than or equal to \code{min.freq} and does not include elements with \code{NA} -values or those starting with a hyphen ("-"). - -A single string where elements are delimited by spaces. The function -performs necessary substitutions based on the \code{conversion_type} and cleans up -extraneous characters like newlines, tabs, and multiple spaces. - -A tibble (tbl_df) with two columns: -\describe{ -\item{\code{words}}{A column containing the individual words -(domains or domain architectures).} -\item{\code{freq}}{A column containing the frequency counts for each word.} -} - -A tibble with the same structure as \code{x}, but filtered to include -only rows where the frequency is greater than or equal to \code{min.freq}. - A tibble summarizing the counts of occurrences of elements in the \code{column}, grouped by the \code{by} column. The result includes the number of occurrences (\code{count}) and is arranged in descending order of count. @@ -187,13 +115,7 @@ the overall count. } } \description{ -filter_by_doms filters a data frame by identifying exact domain matches -and either keeping or removing rows with the identified domain - -Break string ELEMENTS into WORDS for domain architecture (DA) and genomic -context (GC) - -Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} +A collection of summary functions for the MolEvolvR package. Function to summarize and retrieve counts by Domains & Domains+Lineage @@ -204,57 +126,32 @@ Creates a data frame with a totalcount column This function is designed to sum the counts column by either Genomic Context or Domain Architecture and creates a totalcount column from those sums. } \note{ -There is no need to make the domains 'regex safe', that will be handled by this function - Please refer to the source code if you have alternate file formats and/or column names. } \examples{ \dontrun{ -filter_by_doms() -} -\dontrun{ -count_bycol(prot = my_data, column = "DomArch", min.freq = 10) -} -\dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", -"a+b", "b+c", "b-c")) |> elements2words() -} - -\dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> - elements2words() |> - words2wc() -} - -\dontrun{ -filter_freq() -} -\dontrun{ library(tidyverse) tibble(DomArch = c("a+b", "a+b", "b+c", "a+b"), Lineage = c("l1", "l1", "l1", "l2")) |> - summarize_bylin(query = "all") + summarizeByLineage(query = "all") } \dontrun{ -summ.DA.byLin() +summarizeDomArch_ByLineage() } \dontrun{ -summ.DA() +summarizeDomArch() } \dontrun{ -summ.GC.byDALin +summarizeGenContext_ByDomArchLineage } \dontrun{ -summ.GC.byLin() +summarizeGenContext_ByLineage() } \dontrun{ -summ.GC() +summarizeGenContext() } \dontrun{ -total_counts(pspa - gc_lin_counts, 0, "GC") -} +totalGenContextOrDomArchCounts(pspa - gc_lin_counts, 0, "GC") } -\author{ -Samuel Chen, Janani Ravi } diff --git a/man/countbycolumn.Rd b/man/countbycolumn.Rd index 34fcc3e0..57ff9ac4 100644 --- a/man/countbycolumn.Rd +++ b/man/countbycolumn.Rd @@ -2,21 +2,37 @@ % Please edit documentation in R/summarize.R \name{countByColumn} \alias{countByColumn} -\title{Count By Column} +\title{countByColumn} \usage{ countByColumn(prot = prot, column = "DomArch", min.freq = 1) } \arguments{ -\item{min.freq}{} +\item{prot}{A data frame containing the dataset to analyze, typically with +multiple columns including the one specified by the \code{column} parameter.} + +\item{column}{A character string specifying the name of the column to analyze. +The default is "DomArch".} + +\item{min.freq}{An integer specifying the minimum frequency an element must +have to be included in the output. Default is 1.} } \value{ -Describe return, in detail +A tibble with two columns: +\describe{ +\item{\code{column}}{The unique elements from the specified column +(e.g., "DomArch").} +\item{\code{freq}}{The frequency of each element, i.e., the number of times +each element appears in the specified column.} +} +The tibble is filtered to only include elements that have a frequency +greater than or equal to \code{min.freq} and does not include elements with \code{NA} +values or those starting with a hyphen ("-"). } \description{ -Count By Column +Function to obtain element counts (DA, GC) } \examples{ \dontrun{ -countByColumn() +countByColumn(prot = my_data, column = "DomArch", min.freq = 10) } } diff --git a/man/elements2Words.Rd b/man/elements2Words.Rd index 1094d363..bfd3071b 100644 --- a/man/elements2Words.Rd +++ b/man/elements2Words.Rd @@ -2,20 +2,30 @@ % Please edit documentation in R/summarize.R \name{elements2Words} \alias{elements2Words} -\title{Elements 2 Words} +\title{elements2Words} \usage{ elements2Words(prot, column = "DomArch", conversion_type = "da2doms") } \arguments{ -\item{prot}{\link{dataframe}} +\item{prot}{A dataframe containing the dataset to analyze. The specified +\code{column} contains the string elements to be processed.} -\item{column}{\link{string} column name} +\item{column}{A character string specifying the name of the column to analyze. +Default is "DomArch".} -\item{conversion_type}{\link{string} type of conversion: 'da2doms': domain architectures to -domains. 'gc2da' genomic context to domain architectures} +\item{conversion_type}{A character string specifying the type of conversion. +Two options are available: +\describe{ +\item{\code{da2doms}}{Convert domain architectures into individual domains by +replacing \code{+} symbols with spaces.} +\item{\code{gc2da}}{Convert genomic context into domain architectures by +replacing directional symbols (\verb{<-}, \verb{->}, and \code{|}) with spaces.} +}} } \value{ -\link{string} with words delimited by spaces +A single string where elements are delimited by spaces. The function +performs necessary substitutions based on the \code{conversion_type} and cleans up +extraneous characters like newlines, tabs, and multiple spaces. } \description{ Break string ELEMENTS into WORDS for domain architecture (DA) and genomic @@ -23,7 +33,8 @@ context (GC) } \examples{ \dontrun{ -tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> elements2Words() +tibble::tibble(DomArch = c("aaa+bbb", +"a+b", "b+c", "b-c")) |> elements2Words() } } diff --git a/man/filterbydomains.Rd b/man/filterbydomains.Rd index 8c885363..afb3e5cb 100644 --- a/man/filterbydomains.Rd +++ b/man/filterbydomains.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/summarize.R \name{filterByDomains} \alias{filterByDomains} -\title{Filter by Domains} +\title{filterByDomains} \usage{ filterByDomains( prot, diff --git a/man/filterbyfrequency.Rd b/man/filterbyfrequency.Rd index d2c5f9cd..15d06d67 100644 --- a/man/filterbyfrequency.Rd +++ b/man/filterbyfrequency.Rd @@ -2,18 +2,24 @@ % Please edit documentation in R/summarize.R \name{filterByFrequency} \alias{filterByFrequency} -\title{Filter Frequency} +\title{filterByFrequency} \usage{ filterByFrequency(x, min.freq) } \arguments{ -\item{min.freq}{} +\item{x}{A tibble (tbl_df) containing at least two columns: one for +elements (e.g., \code{words}) and one for their frequency (e.g., \code{freq}).} + +\item{min.freq}{A numeric value specifying the minimum frequency threshold. +Only elements with frequencies greater than or equal to this value will be +retained.} } \value{ -Describe return, in detail +A tibble with the same structure as \code{x}, but filtered to include +only rows where the frequency is greater than or equal to \code{min.freq}. } \description{ -Filter Frequency +Function to filter based on frequencies } \examples{ \dontrun{ diff --git a/man/findparalogs.Rd b/man/findparalogs.Rd index 4b5edbcf..d92edf71 100644 --- a/man/findparalogs.Rd +++ b/man/findparalogs.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/summarize.R \name{findParalogs} \alias{findParalogs} -\title{Find Paralogs} +\title{findParalogs} \usage{ findParalogs(prot) } diff --git a/man/summarizeDomArch.Rd b/man/summarizeDomArch.Rd deleted file mode 100644 index 11db1afa..00000000 --- a/man/summarizeDomArch.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeDomArch} -\alias{summarizeDomArch} -\title{summarizeDomArch} -\usage{ -summarizeDomArch(x) -} -\arguments{ -\item{x}{} -} -\value{ -Describe return, in detail -} -\description{ -Function to retrieve counts of how many lineages a DomArch appears in -} -\examples{ -\dontrun{ -summarizeDomArch() -} -} diff --git a/man/summarizeDomArch_ByLineage.Rd b/man/summarizeDomArch_ByLineage.Rd deleted file mode 100644 index cf5fac22..00000000 --- a/man/summarizeDomArch_ByLineage.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeDomArch_ByLineage} -\alias{summarizeDomArch_ByLineage} -\title{summarizeDomArch_ByLineage} -\usage{ -summarizeDomArch_ByLineage(x) -} -\arguments{ -\item{x}{} -} -\value{ -Describe return, in detail -} -\description{ -Function to summarize and retrieve counts by Domains & Domains+Lineage -} -\examples{ -\dontrun{ -summarizeDomArch_ByLineage() -} -} diff --git a/man/summarizeGenContext.Rd b/man/summarizeGenContext.Rd deleted file mode 100644 index 5a40811b..00000000 --- a/man/summarizeGenContext.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeGenContext} -\alias{summarizeGenContext} -\title{summarizeGenContext} -\usage{ -summarizeGenContext(x) -} -\arguments{ -\item{x}{} -} -\value{ -Describe return, in detail -} -\description{ -summarizeGenContext -} -\examples{ -\dontrun{ -summarizeGenContext() -} -} diff --git a/man/summarizeGenContext_ByDomArchLineage.Rd b/man/summarizeGenContext_ByDomArchLineage.Rd deleted file mode 100644 index 59e0376e..00000000 --- a/man/summarizeGenContext_ByDomArchLineage.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeGenContext_ByDomArchLineage} -\alias{summarizeGenContext_ByDomArchLineage} -\title{summarizeGenContext_ByDomArchLineage} -\usage{ -summarizeGenContext_ByDomArchLineage(x) -} -\arguments{ -\item{x}{} -} -\value{ -Define return, in detail -} -\description{ -summarizeGenContext_ByDomArchLineage -} -\examples{ -\dontrun{ -summarizeGenContext_ByDomArchLineage -} -} diff --git a/man/summarizeGenContext_ByLineage.Rd b/man/summarizeGenContext_ByLineage.Rd deleted file mode 100644 index 932fe6a7..00000000 --- a/man/summarizeGenContext_ByLineage.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeGenContext_ByLineage} -\alias{summarizeGenContext_ByLineage} -\title{summarizeGenContext_ByLineage} -\usage{ -summarizeGenContext_ByLineage(x) -} -\arguments{ -\item{x}{} -} -\value{ -Describe return, in detail -} -\description{ -summarizeGenContext_ByLineage -} -\examples{ -\dontrun{ -summarizeGenContext_ByLineage() -} -} diff --git a/man/summarizebylineage.Rd b/man/summarizebylineage.Rd deleted file mode 100644 index 2e445913..00000000 --- a/man/summarizebylineage.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{summarizeByLineage} -\alias{summarizeByLineage} -\title{Summarize by Lineage} -\usage{ -summarizeByLineage(prot = "prot", column = "DomArch", by = "Lineage", query) -} -\arguments{ -\item{query}{} -} -\value{ -Describe return, in detail -} -\description{ -Summarize by Lineage -} -\examples{ -\dontrun{ -library(tidyverse) -tibble(DomArch = c("a+b", "a+b", "b+c", "a+b"), Lineage = c("l1", "l1", "l1", "l2")) |> - summarizeByLineage(query = "all") -} - -} diff --git a/man/totalgencontextordomarchcounts.Rd b/man/totalgencontextordomarchcounts.Rd deleted file mode 100644 index f457cb6a..00000000 --- a/man/totalgencontextordomarchcounts.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/summarize.R -\name{totalGenContextOrDomArchCounts} -\alias{totalGenContextOrDomArchCounts} -\title{Total Counts} -\usage{ -totalGenContextOrDomArchCounts( - prot, - column = "DomArch", - lineage_col = "Lineage", - cutoff = 90, - RowsCutoff = FALSE, - digits = 2 -) -} -\arguments{ -\item{prot}{A data frame that must contain columns: -\itemize{\item Either 'GenContext' or 'DomArch.norep' \item count}} - -\item{column}{Character. The column to summarize} - -\item{cutoff}{Numeric. Cutoff for total count. Counts below cutoff value will not be shown. Default is 0.} - -\item{digits}{} -} -\value{ -Define return, in detail -} -\description{ -Creates a data frame with a totalcount column - -This function is designed to sum the counts column by either Genomic Context or Domain Architecture and creates a totalcount column from those sums. -} -\note{ -Please refer to the source code if you have alternate file formats and/or -column names. -} -\examples{ -\dontrun{ -totalGenContextOrDomArchCounts(pspa - gc_lin_counts, 0, "GC") -} -} diff --git a/man/words2wordcounts.Rd b/man/words2wordcounts.Rd index 7f60f226..370dec7f 100644 --- a/man/words2wordcounts.Rd +++ b/man/words2wordcounts.Rd @@ -2,15 +2,22 @@ % Please edit documentation in R/summarize.R \name{words2WordCounts} \alias{words2WordCounts} -\title{Words 2 Word Counts} +\title{words2WordCounts} \usage{ words2WordCounts(string) } \arguments{ -\item{string}{} +\item{string}{A character string containing the elements (words) to count. +This would typically be a space-delimited string representing domain +architectures or genomic contexts.} } \value{ -\link{tbl_df} table with 2 columns: 1) words & 2) counts/frequency +A tibble (tbl_df) with two columns: +\describe{ +\item{\code{words}}{A column containing the individual words +(domains or domain architectures).} +\item{\code{freq}}{A column containing the frequency counts for each word.} +} } \description{ Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} From 11b22113b52087c6a72e7df4b845d8f0323c367b Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 09:02:19 -0600 Subject: [PATCH 26/41] minor phrasing adjustment --- .github/CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 9fcd6b7f..f9f8de97 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -32,7 +32,7 @@ See our guide on [how to create a great issue](https://code-review.tidyverse.org ``` usethis::create_from_github("JRaviLab/MolEvolvR", fork = TRUE) ``` -- Install Bioconductor dependencies: +- Install BiocManager from Bioconductor: ``` if (!require("BiocManager", quietly = TRUE)) From 851d8796c9d9f4d895fd92f5eacb8f1eab45eda9 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 09:02:36 -0600 Subject: [PATCH 27/41] skip sending quarto files to Git --- .github/.gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/.gitignore b/.github/.gitignore index 2d19fc76..5c86aa40 100644 --- a/.github/.gitignore +++ b/.github/.gitignore @@ -1 +1,3 @@ *.html + +/.quarto/ From 2d00b6fa42b124acf8cd3cd63e60cec745d71a10 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 13:46:03 -0600 Subject: [PATCH 28/41] modify .Rd names --- R/ipr2viz.R | 9 ++++---- man/countByColumn.Rd | 38 +++++++++++++++++++++++++++++++ man/filterByDomains.Rd | 44 ++++++++++++++++++++++++++++++++++++ man/filterByFrequency.Rd | 28 +++++++++++++++++++++++ man/findParalogs.Rd | 26 +++++++++++++++++++++ man/getTopAccByLinDomArch.Rd | 2 +- man/plotIPR2Viz.Rd | 4 ++-- man/plotIPR2VizWeb.Rd | 4 ++-- man/themeGenes2.Rd | 4 ++-- man/words2WordCounts.Rd | 32 ++++++++++++++++++++++++++ 10 files changed, 180 insertions(+), 11 deletions(-) create mode 100644 man/countByColumn.Rd create mode 100644 man/filterByDomains.Rd create mode 100644 man/filterByFrequency.Rd create mode 100644 man/findParalogs.Rd create mode 100644 man/words2WordCounts.Rd diff --git a/R/ipr2viz.R b/R/ipr2viz.R index dff6e67a..9b625d4e 100644 --- a/R/ipr2viz.R +++ b/R/ipr2viz.R @@ -15,7 +15,7 @@ ################################# ## themeGenes2 adapted from theme_genes (w/o strip.text()) ## https://github.com/wilkox/gggenes/blob/master/R/theme_genes.R -#' Theme Genes2 +#' themeGenes2 #' #' @importFrom ggplot2 element_blank element_line theme theme_grey #' @@ -41,7 +41,8 @@ themeGenes2 <- function() { ################################## ## Get Top N AccNum by Lin+DomArch ################################## -#' Group by lineage + DA then take top 20 +#' getTopAccByLinDomArch +#' @description Group by lineage + DA then take top 20 #' #' @param infile_full #' @param DA_col @@ -92,7 +93,7 @@ getTopAccByLinDomArch <- function(infile_full, ############################################# ## IPR + FULL files --> DomArch Visualization ############################################# -#' IPR2Viz +#' plotIPR2Viz #' #' @param infile_ipr #' @param infile_full @@ -248,7 +249,7 @@ plotIPR2Viz <- function(infile_ipr = NULL, infile_full = NULL, accessions = c(), return(plot) } -#' IPR2Viz Web +#' plotIPR2VizWeb #' #' @param infile_ipr #' @param accessions diff --git a/man/countByColumn.Rd b/man/countByColumn.Rd new file mode 100644 index 00000000..57ff9ac4 --- /dev/null +++ b/man/countByColumn.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/summarize.R +\name{countByColumn} +\alias{countByColumn} +\title{countByColumn} +\usage{ +countByColumn(prot = prot, column = "DomArch", min.freq = 1) +} +\arguments{ +\item{prot}{A data frame containing the dataset to analyze, typically with +multiple columns including the one specified by the \code{column} parameter.} + +\item{column}{A character string specifying the name of the column to analyze. +The default is "DomArch".} + +\item{min.freq}{An integer specifying the minimum frequency an element must +have to be included in the output. Default is 1.} +} +\value{ +A tibble with two columns: +\describe{ +\item{\code{column}}{The unique elements from the specified column +(e.g., "DomArch").} +\item{\code{freq}}{The frequency of each element, i.e., the number of times +each element appears in the specified column.} +} +The tibble is filtered to only include elements that have a frequency +greater than or equal to \code{min.freq} and does not include elements with \code{NA} +values or those starting with a hyphen ("-"). +} +\description{ +Function to obtain element counts (DA, GC) +} +\examples{ +\dontrun{ +countByColumn(prot = my_data, column = "DomArch", min.freq = 10) +} +} diff --git a/man/filterByDomains.Rd b/man/filterByDomains.Rd new file mode 100644 index 00000000..afb3e5cb --- /dev/null +++ b/man/filterByDomains.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/summarize.R +\name{filterByDomains} +\alias{filterByDomains} +\title{filterByDomains} +\usage{ +filterByDomains( + prot, + column = "DomArch", + doms_keep = c(), + doms_remove = c(), + ignore.case = FALSE +) +} +\arguments{ +\item{prot}{Dataframe to filter} + +\item{column}{Column to search for domains in (DomArch column)} + +\item{doms_keep}{Vector of domains that must be identified within column in order for +observation to be kept} + +\item{doms_remove}{Vector of domains that, if found within an observation, will be removed} + +\item{ignore.case}{Should the matching be non case sensitive} +} +\value{ +Filtered data frame +} +\description{ +filterByDomains filters a data frame by identifying exact domain matches +and either keeping or removing rows with the identified domain +} +\note{ +There is no need to make the domains 'regex safe', that will be handled by this function +} +\examples{ +\dontrun{ +filterByDomains() +} +} +\author{ +Samuel Chen, Janani Ravi +} diff --git a/man/filterByFrequency.Rd b/man/filterByFrequency.Rd new file mode 100644 index 00000000..15d06d67 --- /dev/null +++ b/man/filterByFrequency.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/summarize.R +\name{filterByFrequency} +\alias{filterByFrequency} +\title{filterByFrequency} +\usage{ +filterByFrequency(x, min.freq) +} +\arguments{ +\item{x}{A tibble (tbl_df) containing at least two columns: one for +elements (e.g., \code{words}) and one for their frequency (e.g., \code{freq}).} + +\item{min.freq}{A numeric value specifying the minimum frequency threshold. +Only elements with frequencies greater than or equal to this value will be +retained.} +} +\value{ +A tibble with the same structure as \code{x}, but filtered to include +only rows where the frequency is greater than or equal to \code{min.freq}. +} +\description{ +Function to filter based on frequencies +} +\examples{ +\dontrun{ +filterByFrequency() +} +} diff --git a/man/findParalogs.Rd b/man/findParalogs.Rd new file mode 100644 index 00000000..d92edf71 --- /dev/null +++ b/man/findParalogs.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/summarize.R +\name{findParalogs} +\alias{findParalogs} +\title{findParalogs} +\usage{ +findParalogs(prot) +} +\arguments{ +\item{prot}{A data frame filtered by a Query, containing columns Species and Lineage} +} +\value{ +returns a dataframe containing paralogs and the counts. +} +\description{ +Creates a data frame of paralogs. +} +\note{ +Please refer to the source code if you have alternate file formats and/or +column names. +} +\examples{ +\dontrun{ +findParalogs(pspa) +} +} diff --git a/man/getTopAccByLinDomArch.Rd b/man/getTopAccByLinDomArch.Rd index a00da5c7..b8571350 100644 --- a/man/getTopAccByLinDomArch.Rd +++ b/man/getTopAccByLinDomArch.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/ipr2viz.R \name{getTopAccByLinDomArch} \alias{getTopAccByLinDomArch} -\title{Group by lineage + DA then take top 20} +\title{getTopAccByLinDomArch} \usage{ getTopAccByLinDomArch( infile_full, diff --git a/man/plotIPR2Viz.Rd b/man/plotIPR2Viz.Rd index 22297312..7ed420c9 100644 --- a/man/plotIPR2Viz.Rd +++ b/man/plotIPR2Viz.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/ipr2viz.R \name{plotIPR2Viz} \alias{plotIPR2Viz} -\title{IPR2Viz} +\title{plotIPR2Viz} \usage{ plotIPR2Viz( infile_ipr = NULL, @@ -20,5 +20,5 @@ plotIPR2Viz( \item{query}{} } \description{ -IPR2Viz +plotIPR2Viz } diff --git a/man/plotIPR2VizWeb.Rd b/man/plotIPR2VizWeb.Rd index 4b4394ad..3b94a5a7 100644 --- a/man/plotIPR2VizWeb.Rd +++ b/man/plotIPR2VizWeb.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/ipr2viz.R \name{plotIPR2VizWeb} \alias{plotIPR2VizWeb} -\title{IPR2Viz Web} +\title{plotIPR2VizWeb} \usage{ plotIPR2VizWeb( infile_ipr, @@ -20,5 +20,5 @@ plotIPR2VizWeb( \item{rows}{} } \description{ -IPR2Viz Web +plotIPR2VizWeb } diff --git a/man/themeGenes2.Rd b/man/themeGenes2.Rd index 1553e019..64ae9273 100644 --- a/man/themeGenes2.Rd +++ b/man/themeGenes2.Rd @@ -2,10 +2,10 @@ % Please edit documentation in R/ipr2viz.R \name{themeGenes2} \alias{themeGenes2} -\title{Theme Genes2} +\title{themeGenes2} \usage{ themeGenes2() } \description{ -Theme Genes2 +themeGenes2 } diff --git a/man/words2WordCounts.Rd b/man/words2WordCounts.Rd new file mode 100644 index 00000000..370dec7f --- /dev/null +++ b/man/words2WordCounts.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/summarize.R +\name{words2WordCounts} +\alias{words2WordCounts} +\title{words2WordCounts} +\usage{ +words2WordCounts(string) +} +\arguments{ +\item{string}{A character string containing the elements (words) to count. +This would typically be a space-delimited string representing domain +architectures or genomic contexts.} +} +\value{ +A tibble (tbl_df) with two columns: +\describe{ +\item{\code{words}}{A column containing the individual words +(domains or domain architectures).} +\item{\code{freq}}{A column containing the frequency counts for each word.} +} +} +\description{ +Get word counts (wc) \link{DOMAINS (DA) or DOMAIN ARCHITECTURES (GC)} +} +\examples{ +\dontrun{ +tibble::tibble(DomArch = c("aaa+bbb", "a+b", "b+c", "b-c")) |> + elements2Words() |> + words2WordCounts() +} + +} From 56b39da61292ae0facc31c104e90927f2483413e Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 13:54:22 -0600 Subject: [PATCH 29/41] let R manage NAMESPACE sort order --- NAMESPACE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NAMESPACE b/NAMESPACE index 08f3aa92..dc5c95a4 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -52,8 +52,8 @@ export(gc_undirected_network) export(generateAllAlignments2FA) export(generate_all_aln2fa) export(generate_msa) -export(getTopAccByLinDomArch) export(getAccNumFromFA) +export(getTopAccByLinDomArch) export(get_accnums_from_fasta_file) export(get_proc_medians) export(get_proc_weights) From a74fb69a54f6a6ca39005f0b4d8cbf4dc15ee91c Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 18:41:02 -0600 Subject: [PATCH 30/41] maintain function name consistency with CHANGED-pre-msa-tree.R and pre-msa-tree.R while we determine where these functions should live. --- NAMESPACE | 1 - R/CHANGED-pre-msa-tree.R | 6 +++--- man/write.MsaAAMultipleAlignment.Rd | 20 -------------------- man/writeMSA_AA2FA.Rd | 7 ++++++- 4 files changed, 9 insertions(+), 25 deletions(-) delete mode 100644 man/write.MsaAAMultipleAlignment.Rd diff --git a/NAMESPACE b/NAMESPACE index dc5c95a4..7271b65f 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -104,7 +104,6 @@ export(to_titlecase) export(totalGenContextOrDomArchCounts) export(validateCountDF) export(wordcloud3) -export(write.MsaAAMultipleAlignment) export(writeMSA_AA2FA) export(write_proc_medians_table) export(write_proc_medians_yml) diff --git a/R/CHANGED-pre-msa-tree.R b/R/CHANGED-pre-msa-tree.R index c4a97589..a755df8c 100644 --- a/R/CHANGED-pre-msa-tree.R +++ b/R/CHANGED-pre-msa-tree.R @@ -610,12 +610,12 @@ alignFasta <- function(fasta_file, tool = "Muscle", outpath = NULL) { ) if (typeof(outpath) == "character") { - write.MsaAAMultipleAlignment(aligned, outpath) + writeMSA_AA2FA(aligned, outpath) } return(aligned) } -#' Write MsaAAMultpleAlignment Objects as algined fasta sequence +#' writeMSA_AA2FA #' #' @description #' MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega @@ -632,7 +632,7 @@ alignFasta <- function(fasta_file, tool = "Muscle", outpath = NULL) { #' @export #' #' @examples -write.MsaAAMultipleAlignment <- function(alignment, outpath) { +writeMSA_AA2FA <- function(alignment, outpath) { l <- length(rownames(alignment)) fasta <- "" for (i in 1:l) diff --git a/man/write.MsaAAMultipleAlignment.Rd b/man/write.MsaAAMultipleAlignment.Rd deleted file mode 100644 index e26f26e7..00000000 --- a/man/write.MsaAAMultipleAlignment.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R -\name{write.MsaAAMultipleAlignment} -\alias{write.MsaAAMultipleAlignment} -\title{Write MsaAAMultpleAlignment Objects as algined fasta sequence} -\usage{ -write.MsaAAMultipleAlignment(alignment, outpath) -} -\arguments{ -\item{alignment}{MsaAAMultipleAlignment object to be written as a fasta} - -\item{outpath}{Where the resulting FASTA file should be written to} -} -\description{ -MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega -and msaMuscle from the 'msa' package -} -\author{ -Samuel Chen, Janani Ravi -} diff --git a/man/writeMSA_AA2FA.Rd b/man/writeMSA_AA2FA.Rd index 068e5b63..a6798469 100644 --- a/man/writeMSA_AA2FA.Rd +++ b/man/writeMSA_AA2FA.Rd @@ -1,9 +1,11 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{writeMSA_AA2FA} \alias{writeMSA_AA2FA} \title{writeMSA_AA2FA} \usage{ +writeMSA_AA2FA(alignment, outpath) + writeMSA_AA2FA(alignment, outpath) } \arguments{ @@ -12,6 +14,9 @@ writeMSA_AA2FA(alignment, outpath) \item{outpath}{Where the resulting FASTA file should be written to} } \description{ +MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega +and msaMuscle from the 'msa' package + Write MsaAAMultpleAlignment Objects as aligned fasta sequence MsaAAMultipleAlignment Objects are generated from calls to msaClustalOmega and msaMuscle from the 'msa' package From 5fcd985a88ab270245a554a44adb557fa02acaed Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 18:42:56 -0600 Subject: [PATCH 31/41] maintain function name consistency across .R files while other determinations are made - getAccNumFromFA() --- NAMESPACE | 1 - R/CHANGED-pre-msa-tree.R | 4 ++-- man/getAccNumFromFA.Rd | 6 +++++- man/get_accnums_from_fasta_file.Rd | 14 -------------- 4 files changed, 7 insertions(+), 18 deletions(-) delete mode 100644 man/get_accnums_from_fasta_file.Rd diff --git a/NAMESPACE b/NAMESPACE index 7271b65f..23b29248 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -54,7 +54,6 @@ export(generate_all_aln2fa) export(generate_msa) export(getAccNumFromFA) export(getTopAccByLinDomArch) -export(get_accnums_from_fasta_file) export(get_proc_medians) export(get_proc_weights) export(make_opts2procs) diff --git a/R/CHANGED-pre-msa-tree.R b/R/CHANGED-pre-msa-tree.R index a755df8c..767d51aa 100644 --- a/R/CHANGED-pre-msa-tree.R +++ b/R/CHANGED-pre-msa-tree.R @@ -645,7 +645,7 @@ writeMSA_AA2FA <- function(alignment, outpath) { return(fasta) } -#' Get accnums from fasta file +#' getAccNumFromFA #' #' @param fasta_file #' @@ -655,7 +655,7 @@ writeMSA_AA2FA <- function(alignment, outpath) { #' @export #' #' @examples -get_accnums_from_fasta_file <- function(fasta_file) { +getAccNumFromFA <- function(fasta_file) { txt <- read_file(fasta_file) accnums <- stringi::stri_extract_all_regex(fasta_file, "(?<=>)[\\w,.]+")[[1]] return(accnums) diff --git a/man/getAccNumFromFA.Rd b/man/getAccNumFromFA.Rd index f2409965..d3ab8177 100644 --- a/man/getAccNumFromFA.Rd +++ b/man/getAccNumFromFA.Rd @@ -1,14 +1,18 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{getAccNumFromFA} \alias{getAccNumFromFA} \title{getAccNumFromFA} \usage{ +getAccNumFromFA(fasta_file) + getAccNumFromFA(fasta_file) } \arguments{ \item{fasta_file}{} } \description{ +getAccNumFromFA + getAccNumFromFA } diff --git a/man/get_accnums_from_fasta_file.Rd b/man/get_accnums_from_fasta_file.Rd deleted file mode 100644 index f545d1a0..00000000 --- a/man/get_accnums_from_fasta_file.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R -\name{get_accnums_from_fasta_file} -\alias{get_accnums_from_fasta_file} -\title{Get accnums from fasta file} -\usage{ -get_accnums_from_fasta_file(fasta_file) -} -\arguments{ -\item{fasta_file}{} -} -\description{ -Get accnums from fasta file -} From d544f7ef932be8b44f04d1fae85bf715d976260b Mon Sep 17 00:00:00 2001 From: David Mayer Date: Fri, 11 Oct 2024 18:54:49 -0600 Subject: [PATCH 32/41] additional cross .R file consistency while other function placement decisions are made --- NAMESPACE | 7 ---- R/CHANGED-pre-msa-tree.R | 25 +++++++++++---- R/pre-msa-tree.R | 36 +++++++++++++-------- man/RepresentativeAccNums.Rd | 23 -------------- man/acc2fa.Rd | 3 ++ man/addLeaves2Alignment.Rd | 25 +++++++++++++-- man/addName.Rd | 18 +++++++++-- man/add_leaves.Rd | 50 ----------------------------- man/add_name.Rd | 39 ----------------------- man/alignFasta.Rd | 4 ++- man/convert2TitleCase.Rd | 9 +++++- man/convertAlignment2FA.Rd | 21 ++++++++++-- man/convert_aln2fa.Rd | 53 ------------------------------- man/createRepresentativeAccNum.Rd | 10 +++++- man/generateAllAlignments2FA.Rd | 35 ++++++++++++++++---- man/generate_all_aln2fa.Rd | 48 ---------------------------- man/mapAcc2Name.Rd | 10 ++++-- man/map_acc2name.Rd | 21 ------------ man/to_titlecase.Rd | 25 --------------- 19 files changed, 158 insertions(+), 304 deletions(-) delete mode 100644 man/RepresentativeAccNums.Rd delete mode 100644 man/add_leaves.Rd delete mode 100644 man/add_name.Rd delete mode 100644 man/convert_aln2fa.Rd delete mode 100644 man/generate_all_aln2fa.Rd delete mode 100644 man/map_acc2name.Rd delete mode 100644 man/to_titlecase.Rd diff --git a/NAMESPACE b/NAMESPACE index 23b29248..fe4c23d6 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -4,7 +4,6 @@ export(BinaryDomainNetwork) export(GCA2Lineage) export(GenContextNetwork) export(IPG2Lineage) -export(RepresentativeAccNums) export(acc2FA) export(acc2Lineage) export(acc2fa) @@ -12,8 +11,6 @@ export(addLeaves2Alignment) export(addLineage) export(addName) export(addTaxID) -export(add_leaves) -export(add_name) export(advanced_opts2est_walltime) export(alignFasta) export(assign_job_queue) @@ -31,7 +28,6 @@ export(convert2TitleCase) export(convertAlignment2FA) export(convertAlignment2Trees) export(convertFA2Tree) -export(convert_aln2fa) export(countByColumn) export(createFA2Tree) export(createJobResultsURL) @@ -50,7 +46,6 @@ export(findParalogs) export(formatJobArgumentsHTML) export(gc_undirected_network) export(generateAllAlignments2FA) -export(generate_all_aln2fa) export(generate_msa) export(getAccNumFromFA) export(getTopAccByLinDomArch) @@ -58,7 +53,6 @@ export(get_proc_medians) export(get_proc_weights) export(make_opts2procs) export(mapAcc2Name) -export(map_acc2name) export(map_advanced_opts2procs) export(msa_pdf) export(plotIPR2Viz) @@ -99,7 +93,6 @@ export(summarizeGenContext) export(summarizeGenContext_ByDomArchLineage) export(summarizeGenContext_ByLineage) export(themeGenes2) -export(to_titlecase) export(totalGenContextOrDomArchCounts) export(validateCountDF) export(wordcloud3) diff --git a/R/CHANGED-pre-msa-tree.R b/R/CHANGED-pre-msa-tree.R index 767d51aa..2f6c8a62 100644 --- a/R/CHANGED-pre-msa-tree.R +++ b/R/CHANGED-pre-msa-tree.R @@ -54,7 +54,7 @@ convert2TitleCase <- function(x, y = " ") { ################################ ## Function to add leaves to an alignment file ## !! Add DA to leaves? -#' Adding Leaves to an alignment file w/ accessions +#' addLeaves2Alignment #' #' @author Janani Ravi #' @keywords alignment, accnum, leaves, lineage, species @@ -178,7 +178,7 @@ addLeaves2Alignment <- function(aln_file = "", } -#' Add Name +#' addName #' #' @author Samuel Chen, Janani Ravi #' @description This function adds a new 'Name' column that is comprised of components from @@ -252,7 +252,7 @@ addName <- function(data, ################################ ## Function to convert alignment 'aln' to fasta format for MSA + Tree -#' Adding Leaves to an alignment file w/ accessions +#' convertAlignment2FA #' #' @author Janani Ravi #' @keywords alignment, accnum, leaves, lineage, species @@ -320,6 +320,9 @@ convertAlignment2FA <- function(aln_file = "", return(fasta) } +#' mapAcc2Name +#' +#' @description #' Default renameFA() replacement function. Maps an accession number to its name #' #' @param line The line of a fasta file starting with '>' @@ -382,6 +385,9 @@ renameFA <- function(fa_path, outpath, ################################ ## generateAllAlignments2FA +#' generateAllAlignments2FA +#' +#' @description #' Adding Leaves to an alignment file w/ accessions #' #' @keywords alignment, accnum, leaves, lineage, species @@ -441,10 +447,11 @@ generateAllAlignments2FA <- function(aln_path = here("data/rawdata_aln/"), # accessions <- c("P12345","Q9UHC1","O15530","Q14624","P0DTD1") # accessions <- rep("ANY95992.1", 201) -#' acc2FA converts protein accession numbers to a fasta format. +#' acc2FA #' #' @description -#' Resulting fasta file is written to the outpath. +#' converts protein accession numbers to a fasta format. Resulting +#' fasta file is written to the outpath. #' #' @author Samuel Chen, Janani Ravi #' @keywords accnum, fasta @@ -539,6 +546,9 @@ acc2FA <- function(accessions, outpath, plan = "sequential") { return(result) } +#' createRepresentativeAccNum +#' +#' @description #' Function to generate a vector of one Accession number per distinct observation from 'reduced' column #' #' @author Samuel Chen, Janani Ravi @@ -556,7 +566,7 @@ acc2FA <- function(accessions, outpath, plan = "sequential") { #' @export #' #' @examples -RepresentativeAccNums <- function(prot_data, +createRepresentativeAccNum <- function(prot_data, reduced = "Lineage", accnum_col = "AccNum") { # Get Unique reduced column and then bind the AccNums back to get one AccNum per reduced column @@ -585,6 +595,9 @@ RepresentativeAccNums <- function(prot_data, return(accessions) } +#' alignFasta +#' +#' @description #' Perform a Multiple Sequence Alignment on a FASTA file. #' #' @author Samuel Chen, Janani Ravi diff --git a/R/pre-msa-tree.R b/R/pre-msa-tree.R index fed495f4..290a1644 100644 --- a/R/pre-msa-tree.R +++ b/R/pre-msa-tree.R @@ -49,7 +49,7 @@ api_key <- Sys.getenv("ENTREZ_API_KEY", unset = "YOUR_KEY_HERE") #' @export #' #' @examples -to_titlecase <- function(x, y = " ") { +convert2TitleCase <- function(x, y = " ") { s <- strsplit(x, y)[[1]] paste(toupper(substring(s, 1, 1)), substring(s, 2), sep = "", collapse = y @@ -59,7 +59,7 @@ to_titlecase <- function(x, y = " ") { ################################ ## Function to add leaves to an alignment file ## !! Add DA to leaves? -#' Adding Leaves to an alignment file w/ accessions +#' addLeaves2Alignment #' #' @author Janani Ravi #' @@ -95,9 +95,9 @@ to_titlecase <- function(x, y = " ") { #' #' @examples #' \dontrun{ -#' add_leaves("pspa_snf7.aln", "pspa.txt") +#' addLeaves2Alignment("pspa_snf7.aln", "pspa.txt") #' } -add_leaves <- function(aln_file = "", +addLeaves2Alignment <- function(aln_file = "", lin_file = "data/rawdata_tsv/all_semiclean.txt", # !! finally change to all_clean.txt!! # lin_file="data/rawdata_tsv/PspA.txt", reduced = FALSE) { @@ -184,7 +184,7 @@ add_leaves <- function(aln_file = "", } -#' Title +#' addName #' #' @author Samuel Chen, Janani Ravi #' @@ -209,7 +209,7 @@ add_leaves <- function(aln_file = "", #' @export #' #' @examples -add_name <- function(data, +addName <- function(data, accnum_col = "AccNum", spec_col = "Species", lin_col = "Lineage", lin_sep = ">", out_col = "Name") { cols <- c(accnum_col, "Kingdom", "Phylum", "Genus", "Spp") @@ -258,7 +258,7 @@ add_name <- function(data, ################################ ## Function to convert alignment 'aln' to fasta format for MSA + Tree -#' Adding Leaves to an alignment file w/ accessions +#' convertAlignment2FA #' #' @author Janani Ravi #' @@ -288,9 +288,9 @@ add_name <- function(data, #' #' @examples #' \dontrun{ -#' add_leaves("pspa_snf7.aln", "pspa.txt") +#' convertAlignment2FA("pspa_snf7.aln", "pspa.txt") #' } -convert_aln2fa <- function(aln_file = "", +convertAlignment2FA <- function(aln_file = "", lin_file = "data/rawdata_tsv/all_semiclean.txt", # !! finally change to all_clean.txt!! fa_outpath = "", reduced = FALSE) { @@ -324,6 +324,9 @@ convert_aln2fa <- function(aln_file = "", return(fasta) } +#' mapAcc2Name +#' +#' @description #' Default rename_fasta() replacement function. Maps an accession number to its name #' #' @param line he line of a fasta file starting with '>' @@ -340,7 +343,7 @@ convert_aln2fa <- function(aln_file = "", #' @export #' #' @examples -map_acc2name <- function(line, acc2name, acc_col = "AccNum", name_col = "Name") { +mapAcc2Name <- function(line, acc2name, acc_col = "AccNum", name_col = "Name") { # change to be the name equivalent to an add_names column # Find the first ' ' end_acc <- str_locate(line, " ")[[1]] @@ -386,7 +389,10 @@ rename_fasta <- function(fa_path, outpath, } ################################ -## generate_all_aln2fa +## generateAllAlignments2FA +#' generateAllAlignments2FA +#' +#' @description #' Adding Leaves to an alignment file w/ accessions #' #' @author Janani Ravi @@ -413,9 +419,9 @@ rename_fasta <- function(fa_path, outpath, #' #' @examples #' \dontrun{ -#' generate_all_aln2fa() +#' generateAllAlignments2FA() #' } -generate_all_aln2fa <- function(aln_path = here("data/rawdata_aln/"), +generateAllAlignments2FA <- function(aln_path = here("data/rawdata_aln/"), fa_outpath = here("data/alns/"), lin_file = here("data/rawdata_tsv/all_semiclean.txt"), reduced = F) { @@ -448,6 +454,10 @@ generate_all_aln2fa <- function(aln_path = here("data/rawdata_aln/"), # accessions <- rep("ANY95992.1", 201) #' acc2fa #' +#' @description +#' converts protein accession numbers to a fasta format. Resulting +#' fasta file is written to the outpath. +#' #' @author Samuel Chen, Janani Ravi #' @keywords accnum, fasta #' diff --git a/man/RepresentativeAccNums.Rd b/man/RepresentativeAccNums.Rd deleted file mode 100644 index 57d1f1ab..00000000 --- a/man/RepresentativeAccNums.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R -\name{RepresentativeAccNums} -\alias{RepresentativeAccNums} -\title{Function to generate a vector of one Accession number per distinct observation from 'reduced' column} -\usage{ -RepresentativeAccNums(prot_data, reduced = "Lineage", accnum_col = "AccNum") -} -\arguments{ -\item{prot_data}{Data frame containing Accession Numbers} - -\item{reduced}{Column from prot_data from which distinct observations -will be generated from. -One accession number will be assigned for each of these observations} - -\item{accnum_col}{Column from prot_data that contains Accession Numbers} -} -\description{ -Function to generate a vector of one Accession number per distinct observation from 'reduced' column -} -\author{ -Samuel Chen, Janani Ravi -} diff --git a/man/acc2fa.Rd b/man/acc2fa.Rd index 158b2d51..3e7a756d 100644 --- a/man/acc2fa.Rd +++ b/man/acc2fa.Rd @@ -15,6 +15,9 @@ Function may not work for vectors of length > 10,000} \item{plan}{} } \description{ +converts protein accession numbers to a fasta format. Resulting +fasta file is written to the outpath. + acc2fa converts protein accession numbers to a fasta format. Resulting fasta file is written to the outpath. } diff --git a/man/addLeaves2Alignment.Rd b/man/addLeaves2Alignment.Rd index a758ebd5..d00e6df7 100644 --- a/man/addLeaves2Alignment.Rd +++ b/man/addLeaves2Alignment.Rd @@ -1,9 +1,15 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{addLeaves2Alignment} \alias{addLeaves2Alignment} -\title{Adding Leaves to an alignment file w/ accessions} +\title{addLeaves2Alignment} \usage{ +addLeaves2Alignment( + aln_file = "", + lin_file = "data/rawdata_tsv/all_semiclean.txt", + reduced = FALSE +) + addLeaves2Alignment( aln_file = "", lin_file = "data/rawdata_tsv/all_semiclean.txt", @@ -11,7 +17,7 @@ addLeaves2Alignment( ) } \arguments{ -\item{aln_file}{haracter. Path to file. Input tab-delimited file + +\item{aln_file}{Character. Path to file. Input tab-delimited file + alignment file accnum & alignment. Default is 'pspa_snf7.aln'} @@ -23,15 +29,25 @@ Default is 'pspa.txt'} only one sequence per lineage. Default is FALSE.} } \description{ +Adding Leaves to an alignment file w/ accessions +Genomic Contexts vs Domain Architectures. + Adding Leaves to an alignment file w/ accessions Genomic Contexts vs Domain Architectures. } \details{ +The alignment file would need two columns: 1. accession + +number and 2. alignment. The protein homolog accession to lineage mapping + +file should have + The alignment file would need two columns: 1. accession + number and 2. alignment. The protein homolog accession to lineage mapping + file should have } \note{ +Please refer to the source code if you have alternate + +file formats and/or column names. + Please refer to the source code if you have alternate + file formats and/or column names. } @@ -39,6 +55,9 @@ file formats and/or column names. \dontrun{ addLeaves2Alignment("pspa_snf7.aln", "pspa.txt") } +\dontrun{ +addLeaves2Alignment("pspa_snf7.aln", "pspa.txt") +} } \author{ Janani Ravi diff --git a/man/addName.Rd b/man/addName.Rd index e04f9849..6f171456 100644 --- a/man/addName.Rd +++ b/man/addName.Rd @@ -1,9 +1,18 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{addName} \alias{addName} -\title{Add Name} +\title{addName} \usage{ +addName( + data, + accnum_col = "AccNum", + spec_col = "Species", + lin_col = "Lineage", + lin_sep = ">", + out_col = "Name" +) + addName( data, accnum_col = "AccNum", @@ -28,9 +37,14 @@ addName( Lineage, and AccNum info} } \value{ +Original data with a 'Name' column + Original data with a 'Name' column } \description{ +This function adds a new 'Name' column that is comprised of components from +Kingdom, Phylum, Genus, and species, as well as the accession + This function adds a new 'Name' column that is comprised of components from Kingdom, Phylum, Genus, and species, as well as the accession } diff --git a/man/add_leaves.Rd b/man/add_leaves.Rd deleted file mode 100644 index f1eeed10..00000000 --- a/man/add_leaves.Rd +++ /dev/null @@ -1,50 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R -\name{add_leaves} -\alias{add_leaves} -\title{Adding Leaves to an alignment file w/ accessions} -\usage{ -add_leaves( - aln_file = "", - lin_file = "data/rawdata_tsv/all_semiclean.txt", - reduced = FALSE -) -} -\arguments{ -\item{aln_file}{Character. Path to file. Input tab-delimited file + -alignment file accnum & alignment. -Default is 'pspa_snf7.aln'} - -\item{lin_file}{Character. Path to file. Protein file with accession + -number to lineage mapping. -Default is 'pspa.txt'} - -\item{reduced}{Boolean. If TRUE, a reduced data frame will be generated with -only one sequence per lineage. Default is FALSE.} -} -\description{ -Adding Leaves to an alignment file w/ accessions -Genomic Contexts vs Domain Architectures. -} -\details{ -The alignment file would need two columns: 1. accession + -number and 2. alignment. The protein homolog accession to lineage mapping + -file should have -} -\note{ -Please refer to the source code if you have alternate + -file formats and/or column names. -} -\examples{ -\dontrun{ -add_leaves("pspa_snf7.aln", "pspa.txt") -} -} -\author{ -Janani Ravi -} -\keyword{accnum,} -\keyword{alignment,} -\keyword{leaves,} -\keyword{lineage,} -\keyword{species} diff --git a/man/add_name.Rd b/man/add_name.Rd deleted file mode 100644 index f19139e1..00000000 --- a/man/add_name.Rd +++ /dev/null @@ -1,39 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R -\name{add_name} -\alias{add_name} -\title{Title} -\usage{ -add_name( - data, - accnum_col = "AccNum", - spec_col = "Species", - lin_col = "Lineage", - lin_sep = ">", - out_col = "Name" -) -} -\arguments{ -\item{data}{Data to add name column to} - -\item{accnum_col}{Column containing accession numbers} - -\item{spec_col}{Column containing species} - -\item{lin_col}{Column containing lineage} - -\item{lin_sep}{Character separating lineage levels} - -\item{out_col}{Column that contains the new 'Name' derived from Species, -Lineage, and AccNum info} -} -\value{ -Original data with a 'Name' column -} -\description{ -This function adds a new 'Name' column that is comprised of components from -Kingdom, Phylum, Genus, and species, as well as the accession -} -\author{ -Samuel Chen, Janani Ravi -} diff --git a/man/alignFasta.Rd b/man/alignFasta.Rd index 21b020cf..02a3026b 100644 --- a/man/alignFasta.Rd +++ b/man/alignFasta.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{alignFasta} \alias{alignFasta} -\title{Perform a Multiple Sequence Alignment on a FASTA file.} +\title{alignFasta} \usage{ alignFasta(fasta_file, tool = "Muscle", outpath = NULL) @@ -21,6 +21,8 @@ aligned fasta sequence as a MsaAAMultipleAlignment object aligned fasta sequence as a MsaAAMultipleAlignment object } \description{ +Perform a Multiple Sequence Alignment on a FASTA file. + Perform a Multiple Sequence Alignment on a FASTA file. } \author{ diff --git a/man/convert2TitleCase.Rd b/man/convert2TitleCase.Rd index 84e7fa00..72619285 100644 --- a/man/convert2TitleCase.Rd +++ b/man/convert2TitleCase.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{convert2TitleCase} \alias{convert2TitleCase} \alias{totitle,} @@ -7,6 +7,8 @@ \title{Changing case to 'Title Case'} \usage{ convert2TitleCase(text, delimitter) + +to_titlecase(text, delimitter) } \arguments{ \item{x}{Character vector.} @@ -15,8 +17,13 @@ convert2TitleCase(text, delimitter) } \description{ Translate string to Title Case w/ delimitter. + +Translate string to Title Case w/ delimitter. +Changing case to 'Title Case' } \seealso{ +chartr, toupper, and tolower. + chartr, toupper, and tolower. } \author{ diff --git a/man/convertAlignment2FA.Rd b/man/convertAlignment2FA.Rd index d6b4dc56..8e9ceb94 100644 --- a/man/convertAlignment2FA.Rd +++ b/man/convertAlignment2FA.Rd @@ -1,9 +1,16 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{convertAlignment2FA} \alias{convertAlignment2FA} -\title{Adding Leaves to an alignment file w/ accessions} +\title{convertAlignment2FA} \usage{ +convertAlignment2FA( + aln_file = "", + lin_file = "data/rawdata_tsv/all_semiclean.txt", + fa_outpath = "", + reduced = FALSE +) + convertAlignment2FA( aln_file = "", lin_file = "data/rawdata_tsv/all_semiclean.txt", @@ -31,11 +38,18 @@ Adding Leaves to an alignment file w/ accessions Genomic Contexts vs Domain Architectures. } \details{ +The alignment file would need two columns: 1. accession + +number and 2. alignment. The protein homolog accession to lineage mapping + +file should have + The alignment file would need two columns: 1. accession + number and 2. alignment. The protein homolog accession to lineage mapping + file should have } \note{ +Please refer to the source code if you have alternate + +file formats and/or column names. + Please refer to the source code if you have alternate + file formats and/or column names. } @@ -44,6 +58,9 @@ file formats and/or column names. addLeaves2Alignment("pspa_snf7.aln", "pspa.txt") } +\dontrun{ +convertAlignment2FA("pspa_snf7.aln", "pspa.txt") +} } \author{ Janani Ravi diff --git a/man/convert_aln2fa.Rd b/man/convert_aln2fa.Rd deleted file mode 100644 index 8bebe31d..00000000 --- a/man/convert_aln2fa.Rd +++ /dev/null @@ -1,53 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R -\name{convert_aln2fa} -\alias{convert_aln2fa} -\title{Adding Leaves to an alignment file w/ accessions} -\usage{ -convert_aln2fa( - aln_file = "", - lin_file = "data/rawdata_tsv/all_semiclean.txt", - fa_outpath = "", - reduced = FALSE -) -} -\arguments{ -\item{aln_file}{Character. Path to file. Input tab-delimited file + -alignment file accnum & alignment. -Default is 'pspa_snf7.aln'} - -\item{lin_file}{Character. Path to file. Protein file with accession + -number to lineage mapping. -Default is 'pspa.txt'} - -\item{fa_outpath}{Character. Path to the written fasta file. -Default is 'NULL'} - -\item{reduced}{Boolean. If TRUE, the fasta file will contain only one sequence per lineage. -Default is 'FALSE'} -} -\description{ -Adding Leaves to an alignment file w/ accessions -} -\details{ -The alignment file would need two columns: 1. accession + -number and 2. alignment. The protein homolog accession to lineage mapping + -file should have -} -\note{ -Please refer to the source code if you have alternate + -file formats and/or column names. -} -\examples{ -\dontrun{ -add_leaves("pspa_snf7.aln", "pspa.txt") -} -} -\author{ -Janani Ravi -} -\keyword{accnum,} -\keyword{alignment,} -\keyword{leaves,} -\keyword{lineage,} -\keyword{species} diff --git a/man/createRepresentativeAccNum.Rd b/man/createRepresentativeAccNum.Rd index 3703fe1a..3bd20522 100644 --- a/man/createRepresentativeAccNum.Rd +++ b/man/createRepresentativeAccNum.Rd @@ -1,9 +1,15 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{createRepresentativeAccNum} \alias{createRepresentativeAccNum} \title{createRepresentativeAccNum} \usage{ +createRepresentativeAccNum( + prot_data, + reduced = "Lineage", + accnum_col = "AccNum" +) + createRepresentativeAccNum( prot_data, reduced = "Lineage", @@ -20,6 +26,8 @@ One accession number will be assigned for each of these observations} \item{accnum_col}{Column from prot_data that contains Accession Numbers} } \description{ +Function to generate a vector of one Accession number per distinct observation from 'reduced' column + Function to generate a vector of one Accession number per distinct observation from 'reduced' column } \author{ diff --git a/man/generateAllAlignments2FA.Rd b/man/generateAllAlignments2FA.Rd index 3bf9938a..8f9d8ffc 100644 --- a/man/generateAllAlignments2FA.Rd +++ b/man/generateAllAlignments2FA.Rd @@ -1,9 +1,16 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{generateAllAlignments2FA} \alias{generateAllAlignments2FA} -\title{Adding Leaves to an alignment file w/ accessions} +\title{generateAllAlignments2FA} \usage{ +generateAllAlignments2FA( + aln_path = here("data/rawdata_aln/"), + fa_outpath = here("data/alns/"), + lin_file = here("data/rawdata_tsv/all_semiclean.txt"), + reduced = F +) + generateAllAlignments2FA( aln_path = here("data/rawdata_aln/"), fa_outpath = here("data/alns/"), @@ -15,28 +22,44 @@ generateAllAlignments2FA( \item{aln_path}{Character. Path to alignment files. Default is 'here("data/rawdata_aln/")'} -\item{fa_outpath}{Character. Path to file. Master protein file with AccNum & lineages. -Default is 'here("data/rawdata_tsv/all_semiclean.txt")'} - -\item{lin_file}{Character. Path to the written fasta file. +\item{fa_outpath}{Character. Path to the written fasta file. Default is 'here("data/alns/")'.} +\item{lin_file}{Character. Path to file. Master protein file with AccNum & lineages. +Default is 'here("data/rawdata_tsv/all_semiclean.txt")'} + \item{reduced}{Boolean. If TRUE, the fasta file will contain only one sequence per lineage. Default is 'FALSE'.} } \description{ +Adding Leaves to an alignment file w/ accessions + +Adding Leaves to all alignment files w/ accessions & DAs? + +Adding Leaves to an alignment file w/ accessions + Adding Leaves to all alignment files w/ accessions & DAs? } \details{ +The alignment files would need two columns separated by spaces: 1. AccNum and 2. alignment. The protein homolog file should have AccNum, Species, Lineages. + The alignment files would need two columns separated by spaces: 1. AccNum and 2. alignment. The protein homolog file should have AccNum, Species, Lineages. } \note{ +Please refer to the source code if you have alternate + file formats and/or column names. + Please refer to the source code if you have alternate + file formats and/or column names. } \examples{ \dontrun{ generateAllAlignments2FA() } +\dontrun{ +generateAllAlignments2FA() +} +} +\author{ +Janani Ravi } \keyword{accnum,} \keyword{alignment,} diff --git a/man/generate_all_aln2fa.Rd b/man/generate_all_aln2fa.Rd deleted file mode 100644 index ad6b7136..00000000 --- a/man/generate_all_aln2fa.Rd +++ /dev/null @@ -1,48 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R -\name{generate_all_aln2fa} -\alias{generate_all_aln2fa} -\title{Adding Leaves to an alignment file w/ accessions} -\usage{ -generate_all_aln2fa( - aln_path = here("data/rawdata_aln/"), - fa_outpath = here("data/alns/"), - lin_file = here("data/rawdata_tsv/all_semiclean.txt"), - reduced = F -) -} -\arguments{ -\item{aln_path}{Character. Path to alignment files. -Default is 'here("data/rawdata_aln/")'} - -\item{fa_outpath}{Character. Path to the written fasta file. -Default is 'here("data/alns/")'.} - -\item{lin_file}{Character. Path to file. Master protein file with AccNum & lineages. -Default is 'here("data/rawdata_tsv/all_semiclean.txt")'} - -\item{reduced}{Boolean. If TRUE, the fasta file will contain only one sequence per lineage. -Default is 'FALSE'.} -} -\description{ -Adding Leaves to all alignment files w/ accessions & DAs? -} -\details{ -The alignment files would need two columns separated by spaces: 1. AccNum and 2. alignment. The protein homolog file should have AccNum, Species, Lineages. -} -\note{ -Please refer to the source code if you have alternate + file formats and/or column names. -} -\examples{ -\dontrun{ -generate_all_aln2fa() -} -} -\author{ -Janani Ravi -} -\keyword{accnum,} -\keyword{alignment,} -\keyword{leaves,} -\keyword{lineage,} -\keyword{species} diff --git a/man/mapAcc2Name.Rd b/man/mapAcc2Name.Rd index 0f5d447d..39ecb065 100644 --- a/man/mapAcc2Name.Rd +++ b/man/mapAcc2Name.Rd @@ -1,13 +1,15 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CHANGED-pre-msa-tree.R +% Please edit documentation in R/CHANGED-pre-msa-tree.R, R/pre-msa-tree.R \name{mapAcc2Name} \alias{mapAcc2Name} -\title{Default renameFA() replacement function. Maps an accession number to its name} +\title{mapAcc2Name} \usage{ +mapAcc2Name(line, acc2name, acc_col = "AccNum", name_col = "Name") + mapAcc2Name(line, acc2name, acc_col = "AccNum", name_col = "Name") } \arguments{ -\item{line}{The line of a fasta file starting with '>'} +\item{line}{he line of a fasta file starting with '>'} \item{acc2name}{Data Table containing a column of accession numbers and a name column} @@ -18,4 +20,6 @@ are mapped to} } \description{ Default renameFA() replacement function. Maps an accession number to its name + +Default rename_fasta() replacement function. Maps an accession number to its name } diff --git a/man/map_acc2name.Rd b/man/map_acc2name.Rd deleted file mode 100644 index fcdb3023..00000000 --- a/man/map_acc2name.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R -\name{map_acc2name} -\alias{map_acc2name} -\title{Default rename_fasta() replacement function. Maps an accession number to its name} -\usage{ -map_acc2name(line, acc2name, acc_col = "AccNum", name_col = "Name") -} -\arguments{ -\item{line}{he line of a fasta file starting with '>'} - -\item{acc2name}{Data Table containing a column of accession numbers and a name column} - -\item{acc_col}{Name of the column containing Accession numbers} - -\item{name_col}{Name of the column containing the names that the accession numbers -are mapped to} -} -\description{ -Default rename_fasta() replacement function. Maps an accession number to its name -} diff --git a/man/to_titlecase.Rd b/man/to_titlecase.Rd deleted file mode 100644 index 45139d3b..00000000 --- a/man/to_titlecase.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pre-msa-tree.R -\name{to_titlecase} -\alias{to_titlecase} -\alias{totitle,} -\alias{to_title} -\title{To Titlecase} -\usage{ -to_titlecase(text, delimitter) -} -\arguments{ -\item{x}{Character vector.} - -\item{y}{Delimitter. Default is space (" ").} -} -\description{ -Translate string to Title Case w/ delimitter. -Changing case to 'Title Case' -} -\seealso{ -chartr, toupper, and tolower. -} -\author{ -Andrie, Janani Ravi -} From e9460610fb054c1c3109cf728561efe2e6619104 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Sat, 12 Oct 2024 14:09:40 -0600 Subject: [PATCH 33/41] remove outdated .Rd --- man/GCA2lin.Rd | 0 man/acc2lin.Rd | 57 ----------------------------------------------- man/efetch_ipg.Rd | 0 man/ipg2lin.Rd | 0 man/sink.reset.Rd | 0 5 files changed, 57 deletions(-) delete mode 100644 man/GCA2lin.Rd delete mode 100644 man/acc2lin.Rd delete mode 100644 man/efetch_ipg.Rd delete mode 100644 man/ipg2lin.Rd delete mode 100644 man/sink.reset.Rd diff --git a/man/GCA2lin.Rd b/man/GCA2lin.Rd deleted file mode 100644 index e69de29b..00000000 diff --git a/man/acc2lin.Rd b/man/acc2lin.Rd deleted file mode 100644 index d3f2468b..00000000 --- a/man/acc2lin.Rd +++ /dev/null @@ -1,57 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/acc2lin.R, R/lineage.R -\name{acc2lin} -\alias{acc2lin} -\title{acc2lin} -\usage{ -acc2lin( - accessions, - assembly_path, - lineagelookup_path, - ipgout_path = NULL, - plan = "multicore" -) - -acc2lin( - accessions, - assembly_path, - lineagelookup_path, - ipgout_path = NULL, - plan = "multicore" -) -} -\arguments{ -\item{accessions}{Character vector of protein accessions} - -\item{assembly_path}{String of the path to the assembly_summary path -This file can be generated using the "DownloadAssemblySummary()" function} - -\item{lineagelookup_path}{String of the path to the lineage lookup file -(taxid to lineage mapping). This file can be generated using the} - -\item{ipgout_path}{Path to write the results of the efetch run of the accessions -on the ipg database. If NULL, the file will not be written. Defaults to NULL} - -\item{plan}{} -} -\value{ -Describe return, in detail -} -\description{ -This function combines 'efetch_ipg()' -and 'ipg2lin()' to map a set -of protein accessions to their assembly (GCA_ID), tax ID, and lineage. - -Function to map protein accession numbers to lineage - -This function combines 'efetch_ipg()' and 'ipg2lin()' to map a set -of protein accessions to their assembly (GCA_ID), tax ID, and lineage. -} -\examples{ -\dontrun{ -acc2lin() -} -} -\author{ -Samuel Chen, Janani Ravi -} diff --git a/man/efetch_ipg.Rd b/man/efetch_ipg.Rd deleted file mode 100644 index e69de29b..00000000 diff --git a/man/ipg2lin.Rd b/man/ipg2lin.Rd deleted file mode 100644 index e69de29b..00000000 diff --git a/man/sink.reset.Rd b/man/sink.reset.Rd deleted file mode 100644 index e69de29b..00000000 From 9571333c44ac879d9b2b6bc1a38d454fdda69a39 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Sat, 12 Oct 2024 14:10:10 -0600 Subject: [PATCH 34/41] let R sort NAMESPACE --- NAMESPACE | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 60bec5b1..c448ff13 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -11,9 +11,7 @@ export(addLeaves2Alignment) export(addLineage) export(addName) export(addTaxID) -export(advanced_opts2est_walltime) export(alignFasta) -export(assert_count_df) export(assignJobQueue) export(calculateEstimatedWallTimeFromOpts) export(calculateProcessRuntime) @@ -35,9 +33,9 @@ export(countByColumn) export(createFA2Tree) export(createJobResultsURL) export(createJobStatusEmailMessage) +export(createLineageLookup) export(createRepresentativeAccNum) export(createWordCloud2Element) -export(createLineageLookup) export(createWordCloudElement) export(domain_network) export(downloadAssemblySummary) @@ -50,14 +48,14 @@ export(formatJobArgumentsHTML) export(gc_undirected_network) export(generateAllAlignments2FA) export(generate_msa) -export(getProcessRuntimeWeights) export(getAccNumFromFA) +export(getProcessRuntimeWeights) export(getTopAccByLinDomArch) export(mapAcc2Name) export(mapAdvOption2Process) export(mapOption2Process) -export(map_acc2name) export(msa_pdf) +export(plotEstimatedWallTimes) export(plotIPR2Viz) export(plotIPR2VizWeb) export(plotLineageDA) @@ -70,12 +68,10 @@ export(plotStackedLineage) export(plotSunburst) export(plotTreemap) export(plotUpSet) -export(plotEstimatedWallTimes) export(prepareColumnParams) export(prepareSingleColumnParams) export(proteinAcc2TaxID) export(proteinAcc2TaxID_old) -export(prot2tax_old) export(removeAsterisks) export(removeEmptyRows) export(removeTails) From 8c573693b92f2aa216b269e24244d2d63fe0d3a9 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Sat, 12 Oct 2024 14:10:26 -0600 Subject: [PATCH 35/41] regen new .Rd --- man/GCA2Lineage.Rd | 2 +- man/IPG2Lineage.Rd | 5 +++-- man/efetchIPG.Rd | 3 ++- man/sinkReset.Rd | 1 + 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/man/GCA2Lineage.Rd b/man/GCA2Lineage.Rd index 9ec0ce56..9a2a7a30 100644 --- a/man/GCA2Lineage.Rd +++ b/man/GCA2Lineage.Rd @@ -19,7 +19,7 @@ This file can be generated using the "downloadAssemblySummary()" function} \item{lineagelookup_path}{String of the path to the lineage lookup file (taxid to lineage mapping). This file can be generated using the -"create_lineage_lookup()" function} +"createLineageLookup()" function} \item{acc_col}{} } diff --git a/man/IPG2Lineage.Rd b/man/IPG2Lineage.Rd index 282d5cbf..118812ab 100644 --- a/man/IPG2Lineage.Rd +++ b/man/IPG2Lineage.Rd @@ -29,7 +29,7 @@ file} \item{lineagelookup_path}{String of the path to the lineage lookup file (taxid to lineage mapping). This file can be generated using the -"create_lineage_lookup()" function} +"createLineageLookup()" function} \item{assembly_path}{String of the path to the assembly_summary path This file can be generated using the \link[MolEvolvR]{downloadAssemblySummary} function} @@ -39,7 +39,8 @@ A \code{data.table} with the lineage information for the provided protein accessions. } \description{ -Takes the resulting file of an efetch run on the ipg database and +Takes the resulting file +of an efetch run on the ipg database and Takes the resulting file of an efetch run on the ipg database and append lineage, and taxid columns diff --git a/man/efetchIPG.Rd b/man/efetchIPG.Rd index 047e2652..db63024f 100644 --- a/man/efetchIPG.Rd +++ b/man/efetchIPG.Rd @@ -23,7 +23,8 @@ the ipg database} No return value. The function writes the fetched results to \code{out_path}. } \description{ -Perform efetch on the ipg database and write the results to out_path +Perform efetch on the ipg database +and write the results to out_path Perform efetch on the ipg database and write the results to out_path } diff --git a/man/sinkReset.Rd b/man/sinkReset.Rd index 0285c0b2..e3fc7ce4 100644 --- a/man/sinkReset.Rd +++ b/man/sinkReset.Rd @@ -8,6 +8,7 @@ sinkReset() } \value{ No return, but run to close all outstanding \code{sink()}s +and handles any errors or warnings that occur during the process. } \description{ Sink Reset From 2061d7a24b7a699bfeac72270817ae7225365ffa Mon Sep 17 00:00:00 2001 From: David Mayer Date: Sat, 12 Oct 2024 14:10:48 -0600 Subject: [PATCH 36/41] remove old tryCatch code (for now) --- R/acc2lin.R | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/R/acc2lin.R b/R/acc2lin.R index 42315ece..a0a95033 100644 --- a/R/acc2lin.R +++ b/R/acc2lin.R @@ -72,14 +72,6 @@ addLineage <- function(df, acc_col = "AccNum", assembly_path, merged <- merge(df, lins, by.x = acc_col, by.y = "Protein", all.x = TRUE) return(merged) - }, error = function(e) { - print(paste("Error: ", e$message)) - }, warning = function(w) { - print(paste("Warning: ", w$message)) - }, finally = { - print("addLineages function execution completed.") - }) - } @@ -247,13 +239,6 @@ IPG2Lineage <- function(accessions, ipg_file, assembly_path, lineagelookup_path, lins <- lins[!is.na(Lineage)] %>% unique() return(lins) - }, error = function(e) { - print(paste("An error occurred: ", e$message)) - }, warning = function(w) { - print(paste("Warning: ", w$message)) - }, finally = { - print("ipg2lin function execution completed.") - }) } From 70f0de8c57d610eaad122e59d4bf1e96fc455963 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Sun, 13 Oct 2024 19:21:41 -0600 Subject: [PATCH 37/41] remove code not relevant to PR --- R/acc2lin.R | 50 +++--- R/assign_job_queue.R | 359 +++++++++++++------------------------------ R/blastWrappers.R | 105 +++---------- 3 files changed, 153 insertions(+), 361 deletions(-) diff --git a/R/acc2lin.R b/R/acc2lin.R index a0a95033..61aae87c 100644 --- a/R/acc2lin.R +++ b/R/acc2lin.R @@ -157,40 +157,34 @@ efetchIPG <- function(accnums, out_path, plan = "sequential", ...) { return(partitioned) } - tryCatch({ - # Set the future plan strategy - plan(strategy = plan, .skip = T) + # Set the future plan strategy + plan(strategy = plan, .skip = T) - min_groups <- length(accnums) / 200 - groups <- min(max(min_groups, 15), length(accnums)) - partitioned_acc <- partition(accnums, groups) - # Open the sink to the output path - sink(out_path) + min_groups <- length(accnums) / 200 + groups <- min(max(min_groups, 15), length(accnums)) + partitioned_acc <- partition(accnums, groups) - a <- future_map(1:length(partitioned_acc), function(x) { - # Avoid hitting the rate API limit - if (x %% 9 == 0) { - Sys.sleep(1) - } - cat( - entrez_fetch( - id = partitioned_acc[[x]], - db = "ipg", - rettype = "xml", - api_key = "YOUR_KEY_HERE" ## Can this be included in public package? - ) + # Open the sink to the output path + sink(out_path) + + a <- future_map(1:length(partitioned_acc), function(x) { + # Avoid hitting the rate API limit + if (x %% 9 == 0) { + Sys.sleep(1) + } + cat( + entrez_fetch( + id = partitioned_acc[[x]], + db = "ipg", + rettype = "xml", + api_key = "YOUR_KEY_HERE" ## Can this be included in public package? ) - }) - sink(NULL) - }, error = function(e) { - print(paste("An error occurred: ", e$message)) - }, warning = function(w) { - print(paste("Warning: ", w$message)) - }, finally = { - print("efetch_ipg function execution completed.") + ) }) + sink(NULL) + } } diff --git a/R/assign_job_queue.R b/R/assign_job_queue.R index 10df1e3a..4791b4a1 100644 --- a/R/assign_job_queue.R +++ b/R/assign_job_queue.R @@ -13,22 +13,13 @@ common_root <- Sys.getenv("COMMON_SRC_ROOT") #' example: list_opts2procs <- mapOption2Process #' @export mapOption2Process <- function() { - tryCatch({ - opts2processes <- list( - "homology_search" = c("dblast", "dblast_cleanup"), - "domain_architecture" = c("iprscan", "ipr2lineage", "ipr2da"), - # processes always present agnostic of advanced options - "always" = c("blast_clust", "clust2table") - ) - return(opts2processes) - }, error = function(e) { - message(paste("Encountered an error: ", e$message)) - }, warning = function(w) { - message(paste("Warning: ", w$message)) - }, finally = { - message("mapOption2Process function execution completed.") - }) - + opts2processes <- list( + "homology_search" = c("dblast", "dblast_cleanup"), + "domain_architecture" = c("iprscan", "ipr2lineage", "ipr2da"), + # processes always present agnostic of advanced options + "always" = c("blast_clust", "clust2table") + ) + return(opts2processes) } #' Use MolEvolvR advanced options to get associated processes @@ -43,26 +34,14 @@ mapOption2Process <- function() { #' procs <- mapAdvOption2Process(advanced_opts) #' @export mapAdvOption2Process <- function(advanced_opts) { - if (!is.character(advanced_opts)) { - stop("Argument must be a character vector!") - } - tryCatch({ - # append 'always' to add procs that always run - advanced_opts <- c(advanced_opts, "always") - opts2proc <- mapOption2Process() - # setup index for opts2proc based on advanced options - idx <- which(names(opts2proc) %in% advanced_opts) - # extract processes that will run - procs <- opts2proc[idx] |> unlist() - return(procs) - }, error = function(e) { - message(paste("Encountered an error: ", e$message)) - }, warning = function(w) { - message(paste("Warning: ", w$message)) - }, finally = { - message("mapOption2Process function execution completed.") - }) - + # append 'always' to add procs that always run + advanced_opts <- c(advanced_opts, "always") + opts2proc <- mapOption2Process() + # setup index for opts2proc based on advanced options + idx <- which(names(opts2proc) %in% advanced_opts) + # extract processes that will run + procs <- opts2proc[idx] |> unlist() + return(procs) } #' Scrape MolEvolvR logs and calculate median processes @@ -88,60 +67,41 @@ mapAdvOption2Process <- function(advanced_opts) { #' list_proc_medians <- calculateProcessRuntime(dir_job_results) #' @export calculateProcessRuntime <- function(dir_job_results) { - tryCatch({ - # Check if dir_job_results is a character string - if (!is.character(dir_job_results) || length(dir_job_results) != 1) { - stop("Input 'dir_job_results' must be a single character string.") - } + source(file.path(common_root, "molevol_scripts", "R", "metrics.R")) - # Check if dir_job_results exists - if (!dir.exists(dir_job_results)) { - stop(paste("The directory", dir_job_results, "does not exist.")) - } + # aggregate logs from + path_log_data <- file.path(common_root, + "molevol_scripts", "log_data", "prod_logs.rda") - source(file.path(common_root, "molevol_scripts", "R", "metrics.R")) - - # aggregate logs from - path_log_data <- file.path(common_root, - "molevol_scripts", "log_data", "prod_logs.rda") - - # ensure the folder exists to the location - if (!dir.exists(path_log_data)) { - dir.create(dirname(path_log_data), - recursive = TRUE, showWarnings = FALSE) - } - - # attempt to load pre-generated logdata - if (!file.exists(path_log_data)) { - logs <- aggregate_logs(dir_job_results, latest_date = Sys.Date() - 60) - save(logs, file = path_log_data) - } else { - load(path_log_data) # loads the logs object - } - df_log <- logs$df_log - procs <- c( - "dblast", "dblast_cleanup", "iprscan", - "ipr2lineage", "ipr2da", "blast_clust", - "clust2table" - ) - list_proc_medians <- df_log |> - dplyr::select(dplyr::all_of(procs)) |> - dplyr::summarise( - dplyr::across( - dplyr::everything(), - \(x) median(x, na.rm = TRUE) - ) - ) |> - as.list() - return(list_proc_medians) - }, error = function(e) { - message(paste("Encountered an error: ", e$message)) - }, warning = function(w) { - message(paste("Warning: ", w$message)) - }, finally = { - message("calculateProcessRuntime function execution completed.") - }) + # ensure the folder exists to the location + if (!dir.exists(path_log_data)) { + dir.create(dirname(path_log_data), + recursive = TRUE, showWarnings = FALSE) + } + # attempt to load pre-generated logdata + if (!file.exists(path_log_data)) { + logs <- aggregate_logs(dir_job_results, latest_date = Sys.Date() - 60) + save(logs, file = path_log_data) + } else { + load(path_log_data) # loads the logs object + } + df_log <- logs$df_log + procs <- c( + "dblast", "dblast_cleanup", "iprscan", + "ipr2lineage", "ipr2da", "blast_clust", + "clust2table" + ) + list_proc_medians <- df_log |> + dplyr::select(dplyr::all_of(procs)) |> + dplyr::summarise( + dplyr::across( + dplyr::everything(), + \(x) median(x, na.rm = TRUE) + ) + ) |> + as.list() + return(list_proc_medians) } #' Write a table of 2 columns: 1) process and 2) median seconds @@ -162,39 +122,18 @@ calculateProcessRuntime <- function(dir_job_results) { #' ) #' @export writeProcessRuntime2TSV <- function(dir_job_results, filepath) { - tryCatch({ - # Error handling for input arguments - if (!is.character(dir_job_results) || length(dir_job_results) != 1) { - stop("Input 'dir_job_results' must be a single character string.") - } - - if (!dir.exists(dir_job_results)) { - stop(paste("The directory", dir_job_results, "does not exist.")) - } - - if (!is.character(filepath) || length(filepath) != 1) { - stop("Input 'filepath' must be a single character string.") - } - df_proc_medians <- calculateProcessRuntime(dir_job_results) |> - tibble::as_tibble() |> - tidyr::pivot_longer( - dplyr::everything(), - names_to = "process", - values_to = "median_seconds" - ) |> - dplyr::arrange(dplyr::desc(median_seconds)) - - # Write the resulting tibble to a TSV file - readr::write_tsv(df_proc_medians, file = filepath) - return(df_proc_medians) - }, error = function(e) { - message(paste("Encountered an error: ", e$message)) - }, warning = function(w) { - message(paste("Warning: ", w$message)) - }, finally = { - message("writeProcessRuntime2TSV function execution completed.") - }) - + df_proc_medians <- calculateProcessRuntime(dir_job_results) |> + tibble::as_tibble() |> + tidyr::pivot_longer( + dplyr::everything(), + names_to = "process", + values_to = "median_seconds" + ) |> + dplyr::arrange(dplyr::desc(median_seconds)) + + # Write the resulting tibble to a TSV file + readr::write_tsv(df_proc_medians, file = filepath) + return(df_proc_medians) } #' Compute median process runtimes, then write a YAML list of the processes and @@ -219,36 +158,8 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { #' } #' @export writeProcessRuntime2YML <- function(dir_job_results, filepath = NULL) { - tryCatch({ - # Error handling for dir_job_results arguments - if (!is.character(dir_job_results) || length(dir_job_results) != 1) { - stop("Input 'dir_job_results' must be a single character string.") - } - - if (!dir.exists(dir_job_results)) { - stop(paste("The directory", dir_job_results, "does not exist.")) - } - if (is.null(filepath)) { - filepath <- file.path(common_root, - "molevol_scripts", - "log_data", - "job_proc_weights.yml") - } - if (!is.character(filepath) || length(filepath) != 1) { - stop("Input 'filepath' must be a single character string.") - } - - medians <- calculateProcessRuntime(dir_job_results) - yaml::write_yaml(medians, filepath) - }, error = function(e) { - message(paste("Encountered an error: "), e$message) - }, warning = function(w) { - message(paste("Warning: "), w$message) - }, finally = { - message("writeProcessRuntime2TSV function execution completed.") - } - ) - + medians <- calculateProcessRuntime(dir_job_results) + yaml::write_yaml(medians, filepath) } #' Quickly get the runtime weights for MolEvolvR backend processes @@ -322,81 +233,49 @@ calculateEstimatedWallTimeFromOpts <- function(advanced_opts, n_inputs = 1L, n_hits = NULL, verbose = FALSE) { - - tryCatch({ - # to calculate est walltime for a homology search job, the number of hits - # must be provided - validation_fail <- is.null(n_hits) && "homology_search" %in% advanced_opts - stopifnot(!validation_fail) - - # Validate advanced_opts - if (!is.character(advanced_opts)) { - stop("Argument 'advanced_opts' must be a character vector.") - } - - # Validate n_inputs - if (!is.numeric(n_inputs) || length(n_inputs) != 1 || n_inputs <= 0) { - stop("Argument 'n_inputs' must be a single positive numeric value.") - } - - # Validate n_hits if homology_search is in advanced_opts - if ("homology_search" %in% advanced_opts && - (is.null(n_hits)|| !is.numeric(n_hits) - || length(n_hits) != 1 || n_hits < 0)) { - stop("Argument 'n_hits' must be a single non-negative numeric value when - 'homology_search' is in 'advanced_opts'.") - } - - # Get process weights - proc_weights <- writeProcessRuntime2YML() - if (!is.list(proc_weights)) { - stop("Process weights could not be retrieved correctly.") - } - - # sort process weights by names and convert to vec - proc_weights <- proc_weights[order(names(proc_weights))] |> unlist() - all_procs <- names(proc_weights) |> sort() - # get processes from advanced options and sort by names - procs_from_opts <- mapAdvOption2Process(advanced_opts) - procs_from_opts <- sort(procs_from_opts) - # binary encode: yes proc will run (1); else 0 - binary_proc_vec <- dplyr::if_else(all_procs %in% procs_from_opts, 1L, 0L) - # dot product of weights and procs to run; scaled by the number of inputs - est_walltime <- (n_inputs * (binary_proc_vec %*% proc_weights)) |> - as.numeric() - # calculate the additional processes to run for the homologous hits - if ("homology_search" %in% advanced_opts) { - opts2procs <- mapOption2Process() - # exclude the homology search processes for the homologous hits - procs2exclude_for_homologs <- opts2procs[["homology_search"]] - procs_homologs <- procs_from_opts[!(procs_from_opts - %in% procs2exclude_for_homologs)] - binary_proc_vec_homolog <- dplyr::if_else(all_procs - %in% procs_homologs, 1L, 0L) - # add the estimated walltime for processes run on the homologous hits - est_walltime <- est_walltime + - (n_hits * (binary_proc_vec_homolog - %*% proc_weights) |> as.numeric()) - } - if (verbose) { - msg <- stringr::str_glue( - "warnings from calculateEstimatedWallTimeFromOpts ():\n", - "\tn_inputs={n_inputs}\n", - "\tn_hits={ifelse(is.null(n_hits), 'null', n_hits)}\n", - "\test_walltime={est_walltime}\n\n" - ) - cat(file = stderr(), msg) - } - return(est_walltime) - }, error = function(e) { - message(paste("Encountered an error: ", e$message)) - }, warning = function(w) { - message(paste("Warning: ", w$message)) - }, finally = { - message("calculateEstimatedWallTimeFromOpts - function execution completed.") - }) - + # to calculate est walltime for a homology search job, the number of hits + # must be provided + validation_fail <- is.null(n_hits) && "homology_search" %in% advanced_opts + stopifnot(!validation_fail) + + # Get process weights + proc_weights <- writeProcessRuntime2YML() + + # sort process weights by names and convert to vec + proc_weights <- proc_weights[order(names(proc_weights))] |> unlist() + all_procs <- names(proc_weights) |> sort() + # get processes from advanced options and sort by names + procs_from_opts <- mapAdvOption2Process(advanced_opts) + procs_from_opts <- sort(procs_from_opts) + # binary encode: yes proc will run (1); else 0 + binary_proc_vec <- dplyr::if_else(all_procs %in% procs_from_opts, 1L, 0L) + # dot product of weights and procs to run; scaled by the number of inputs + est_walltime <- (n_inputs * (binary_proc_vec %*% proc_weights)) |> + as.numeric() + # calculate the additional processes to run for the homologous hits + if ("homology_search" %in% advanced_opts) { + opts2procs <- mapOption2Process() + # exclude the homology search processes for the homologous hits + procs2exclude_for_homologs <- opts2procs[["homology_search"]] + procs_homologs <- procs_from_opts[!(procs_from_opts + %in% procs2exclude_for_homologs)] + binary_proc_vec_homolog <- dplyr::if_else(all_procs + %in% procs_homologs, 1L, 0L) + # add the estimated walltime for processes run on the homologous hits + est_walltime <- est_walltime + + (n_hits * (binary_proc_vec_homolog + %*% proc_weights) |> as.numeric()) + } + if (verbose) { + msg <- stringr::str_glue( + "warnings from calculateEstimatedWallTimeFromOpts ():\n", + "\tn_inputs={n_inputs}\n", + "\tn_hits={ifelse(is.null(n_hits), 'null', n_hits)}\n", + "\test_walltime={est_walltime}\n\n" + ) + cat(file = stderr(), msg) + } + return(est_walltime) } @@ -418,25 +297,8 @@ assignJobQueue <- function( t_sec_estimate, t_cutoff = 21600 # 6 hours ) { - tryCatch({ - if (!is.numeric(t_sec_estimate) || length(t_sec_estimate) != 1) { - stop("Argument 't_sec_estimate' must be a single numeric value.") - } - - if (!is.numeric(t_cutoff) || length(t_cutoff) != 1 || t_cutoff < 0) { - stop("Argument 't_cutoff' must be a single non-negative numeric value.") - } - - queue <- ifelse(t_sec_estimate > t_cutoff, "long", "short") - return(queue) - }, error = function(e) { - message(paste("Encountered an error: ", e$message)) - }, warning = function(w) { - message(paste("Warning: ", w$message)) - }, finally = { - message("assignJobQueue function execution completed.") - }) - + queue <- ifelse(t_sec_estimate > t_cutoff, "long", "short") + return(queue) } #' Plot the estimated runtimes for different advanced options and number @@ -456,7 +318,6 @@ assignJobQueue <- function( #' dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) #' @export plotEstimatedWallTimes <- function() { - tryCatch({ opts <- mapOption2Process() |> names() # get all possible submission permutations (powerset) get_powerset <- function(vec) { @@ -536,12 +397,4 @@ plotEstimatedWallTimes <- function() { y = "Estimated walltime (hours)" ) return(p) - }, error = function(e) { - message(paste("Encountered an error: ", e$message)) - }, warning = function(w) { - message(paste("Warning: ", w$message)) - }, finally = { - message("plotEstimatedWallTimes function execution completed.") - }) - } diff --git a/R/blastWrappers.R b/R/blastWrappers.R index 15484a1b..9b55f3ee 100755 --- a/R/blastWrappers.R +++ b/R/blastWrappers.R @@ -21,52 +21,24 @@ run_deltablast <- function(deltablast_path, db_search_path, db = "refseq", query, evalue = "1e-5", out, num_alignments, num_threads = 1) { - # Argument validation - if (!file.exists(deltablast_path)) { - stop("The DELTABLAST executable path is invalid: ", deltablast_path) - } - if (!dir.exists(db_search_path)) { - stop("The database search path is invalid: ", db_search_path) - } - if (!file.exists(query)) { - stop("The query file path is invalid: ", query) - } - if (!is.numeric(as.numeric(evalue)) || as.numeric(evalue) <= 0) { - stop("The evalue must be a positive number: ", evalue) - } - if (!is.numeric(num_alignments) || num_alignments <= 0) { - stop("The number of alignments must be a - positive integer: ", num_alignments) - } - if (!is.numeric(num_threads) || num_threads <= 0) { - stop("The number of threads must be a positive integer: ", num_threads) - } - start <- Sys.time() - tryCatch({ - system(paste0("export BLASTDB=/", db_search_path)) - system2( - command = deltablast_path, - args = c( - "-db", db, - "-query", query, - "-evalue", evalue, - "-out", out, - "-num_threads", num_threads, - "-num_alignments", num_alignments - # ,"-outfmt", outfmt - ) + system(paste0("export BLASTDB=/", db_search_path)) + + system2( + command = deltablast_path, + args = c( + "-db", db, + "-query", query, + "-evalue", evalue, + "-out", out, + "-num_threads", num_threads, + "-num_alignments", num_alignments + # ,"-outfmt", outfmt ) - print(Sys.time() - start) - }, error = function(e) { - message(paste("Error in run_deltablast: ", e)) - }, warning = function(w) { - message(paste("Warning in run_deltablast: ", w)) - }, finally = { - message("run_deltablast completed") - }) + ) + print(Sys.time() - start) } @@ -88,46 +60,19 @@ run_deltablast <- function(deltablast_path, db_search_path, run_rpsblast <- function(rpsblast_path, db_search_path, db = "refseq", query, evalue = "1e-5", out, num_threads = 1) { - # Argument validation - if (!file.exists(rpsblast_path)) { - stop("The RPSBLAST executable path is invalid: ", rpsblast_path) - } - if (!dir.exists(db_search_path)) { - stop("The database search path is invalid: ", db_search_path) - } - if (!file.exists(query)) { - stop("The query file path is invalid: ", query) - } - if (!is.numeric(as.numeric(evalue)) || as.numeric(evalue) <= 0) { - stop("The evalue must be a positive number: ", evalue) - } - if (!is.numeric(num_threads) || num_threads <= 0) { - stop("The number of threads must be a positive integer: ", num_threads) - } start <- Sys.time() + system(paste0("export BLASTDB=/", db_search_path)) - tryCatch({ - - system(paste0("export BLASTDB=/", db_search_path)) - - system2( - command = rpsblast_path, - args = c( - "-db", db, - "-query", query, - "-evalue", evalue, - "-out", out, - "-num_threads", num_threads - ) + system2( + command = rpsblast_path, + args = c( + "-db", db, + "-query", query, + "-evalue", evalue, + "-out", out, + "-num_threads", num_threads ) - print(Sys.time() - start) - }, error = function(e) { - message(paste("Error in run_rpsblast: ", e)) - }, warning = function(w) { - message(paste("Warning in run_rpsblast: ", w)) - }, finally = { - message("run_rpsblast completed") - }) - + ) + print(Sys.time() - start) } From 392775de92dfc33b198b41a5a2843f5313dd2e0d Mon Sep 17 00:00:00 2001 From: David Mayer Date: Sun, 13 Oct 2024 19:43:58 -0600 Subject: [PATCH 38/41] adjust .Rd title tags for renamed functions --- R/assign_job_queue.R | 27 +++++++++++++++++++++++ R/create_lineage_lookup.R | 3 +++ man/assignJobQueue.Rd | 2 +- man/calculateEstimatedWallTimeFromOpts.Rd | 3 +-- man/calculateProcessRuntime.Rd | 2 +- man/createLineageLookup.Rd | 2 +- man/getProcessRuntimeWeights.Rd | 2 +- man/mapAdvOption2Process.Rd | 2 +- man/mapOption2Process.Rd | 2 +- man/plotEstimatedWallTimes.Rd | 6 +++-- man/writeProcessRuntime2TSV.Rd | 2 +- man/writeProcessRuntime2YML.Rd | 6 +++-- 12 files changed, 46 insertions(+), 13 deletions(-) diff --git a/R/assign_job_queue.R b/R/assign_job_queue.R index 4791b4a1..20ba841f 100644 --- a/R/assign_job_queue.R +++ b/R/assign_job_queue.R @@ -6,6 +6,9 @@ # file.path(common_root, "molevol_scripts", "R", "assignJobQueue.R") common_root <- Sys.getenv("COMMON_SRC_ROOT") +#' mapOption2Process +#' +#' @description #' Construct list where names (MolEvolvR advanced options) point to processes #' #' @return list where names (MolEvolvR advanced options) point to processes @@ -22,6 +25,9 @@ mapOption2Process <- function() { return(opts2processes) } +#' mapAdvOption2Process +#' +#' @description #' Use MolEvolvR advanced options to get associated processes #' #' @param advanced_opts character vector of MolEvolvR advanced options @@ -44,6 +50,9 @@ mapAdvOption2Process <- function(advanced_opts) { return(procs) } +#' calculateProcessRuntime +#' +#' @description #' Scrape MolEvolvR logs and calculate median processes #' #' @param dir_job_results [chr] path to MolEvolvR job_results @@ -104,6 +113,9 @@ calculateProcessRuntime <- function(dir_job_results) { return(list_proc_medians) } +#' writeProcessRuntime2TSV +#' +#' @description #' Write a table of 2 columns: 1) process and 2) median seconds #' #' @param dir_job_results [chr] path to MolEvolvR job_results @@ -136,6 +148,9 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { return(df_proc_medians) } +#' writeProcessRuntime2YML +#' +#' @description #' Compute median process runtimes, then write a YAML list of the processes and #' their median runtimes in seconds to the path specified by 'filepath'. #' @@ -162,6 +177,9 @@ writeProcessRuntime2YML <- function(dir_job_results, filepath = NULL) { yaml::write_yaml(medians, filepath) } +#' getProcessRuntimeWeights +#' +#' @description #' Quickly get the runtime weights for MolEvolvR backend processes #' #' @param dir_job_results [chr] path to MolEvolvR job_results @@ -213,6 +231,9 @@ getProcessRuntimeWeights <- function(medians_yml_path = NULL) { return(proc_weights) } +#' calculateEstimatedWallTimeFromOpts +#' +#' @description #' Given MolEvolvR advanced options and number of inputs, #' calculate the total estimated walltime for the job #' @@ -279,6 +300,9 @@ calculateEstimatedWallTimeFromOpts <- function(advanced_opts, } +#' assignJobQueue +#' +#' @description #' Decision function to assign job queue #' #' @param t_sec_estimate estimated number of seconds a job will process @@ -301,6 +325,9 @@ assignJobQueue <- function( return(queue) } +#' plotEstimatedWallTimes +#' +#' @description #' Plot the estimated runtimes for different advanced options and number #' of inputs #' diff --git a/R/create_lineage_lookup.R b/R/create_lineage_lookup.R index 78e79048..2408c5e6 100644 --- a/R/create_lineage_lookup.R +++ b/R/create_lineage_lookup.R @@ -3,6 +3,9 @@ # library(biomartr) +#' createLineageLookup +#' +#' @description #' Create a look up table that goes from TaxID, to Lineage #' #' @author Samuel Chen diff --git a/man/assignJobQueue.Rd b/man/assignJobQueue.Rd index 3663ce56..de646a82 100644 --- a/man/assignJobQueue.Rd +++ b/man/assignJobQueue.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{assignJobQueue} \alias{assignJobQueue} -\title{Decision function to assign job queue} +\title{assignJobQueue} \usage{ assignJobQueue(t_sec_estimate, t_cutoff = 21600) } diff --git a/man/calculateEstimatedWallTimeFromOpts.Rd b/man/calculateEstimatedWallTimeFromOpts.Rd index c09cf6a6..d5361001 100644 --- a/man/calculateEstimatedWallTimeFromOpts.Rd +++ b/man/calculateEstimatedWallTimeFromOpts.Rd @@ -2,8 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{calculateEstimatedWallTimeFromOpts} \alias{calculateEstimatedWallTimeFromOpts} -\title{Given MolEvolvR advanced options and number of inputs, -calculate the total estimated walltime for the job} +\title{calculateEstimatedWallTimeFromOpts} \usage{ calculateEstimatedWallTimeFromOpts( advanced_opts, diff --git a/man/calculateProcessRuntime.Rd b/man/calculateProcessRuntime.Rd index bb6dd1ed..579ea2b6 100644 --- a/man/calculateProcessRuntime.Rd +++ b/man/calculateProcessRuntime.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{calculateProcessRuntime} \alias{calculateProcessRuntime} -\title{Scrape MolEvolvR logs and calculate median processes} +\title{calculateProcessRuntime} \usage{ calculateProcessRuntime(dir_job_results) } diff --git a/man/createLineageLookup.Rd b/man/createLineageLookup.Rd index 5dbab978..132019ce 100644 --- a/man/createLineageLookup.Rd +++ b/man/createLineageLookup.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/create_lineage_lookup.R \name{createLineageLookup} \alias{createLineageLookup} -\title{Create a look up table that goes from TaxID, to Lineage} +\title{createLineageLookup} \usage{ createLineageLookup( lineage_file = here("data/rankedlineage.dmp"), diff --git a/man/getProcessRuntimeWeights.Rd b/man/getProcessRuntimeWeights.Rd index ff3c8e5d..de0e2ea6 100644 --- a/man/getProcessRuntimeWeights.Rd +++ b/man/getProcessRuntimeWeights.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{getProcessRuntimeWeights} \alias{getProcessRuntimeWeights} -\title{Quickly get the runtime weights for MolEvolvR backend processes} +\title{getProcessRuntimeWeights} \usage{ getProcessRuntimeWeights(medians_yml_path = NULL) } diff --git a/man/mapAdvOption2Process.Rd b/man/mapAdvOption2Process.Rd index 5bd9ee65..6a210a20 100644 --- a/man/mapAdvOption2Process.Rd +++ b/man/mapAdvOption2Process.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{mapAdvOption2Process} \alias{mapAdvOption2Process} -\title{Use MolEvolvR advanced options to get associated processes} +\title{mapAdvOption2Process} \usage{ mapAdvOption2Process(advanced_opts) } diff --git a/man/mapOption2Process.Rd b/man/mapOption2Process.Rd index ff6905c5..9645617b 100644 --- a/man/mapOption2Process.Rd +++ b/man/mapOption2Process.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{mapOption2Process} \alias{mapOption2Process} -\title{Construct list where names (MolEvolvR advanced options) point to processes} +\title{mapOption2Process} \usage{ mapOption2Process() } diff --git a/man/plotEstimatedWallTimes.Rd b/man/plotEstimatedWallTimes.Rd index 0d53cb32..36b0ecd5 100644 --- a/man/plotEstimatedWallTimes.Rd +++ b/man/plotEstimatedWallTimes.Rd @@ -2,8 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{plotEstimatedWallTimes} \alias{plotEstimatedWallTimes} -\title{Plot the estimated runtimes for different advanced options and number -of inputs} +\title{plotEstimatedWallTimes} \usage{ plotEstimatedWallTimes() } @@ -16,5 +15,8 @@ ggplot2::ggsave(filename = "/data/molevolvr_transfer/molevolvr_ dev/molevol_scripts/docs/estimate_walltimes.png", plot = p) } \description{ +Plot the estimated runtimes for different advanced options and number +of inputs + this function was just for fun; very, very messy code } diff --git a/man/writeProcessRuntime2TSV.Rd b/man/writeProcessRuntime2TSV.Rd index 03cbbd68..0e045a5c 100644 --- a/man/writeProcessRuntime2TSV.Rd +++ b/man/writeProcessRuntime2TSV.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{writeProcessRuntime2TSV} \alias{writeProcessRuntime2TSV} -\title{Write a table of 2 columns: 1) process and 2) median seconds} +\title{writeProcessRuntime2TSV} \usage{ writeProcessRuntime2TSV(dir_job_results, filepath) } diff --git a/man/writeProcessRuntime2YML.Rd b/man/writeProcessRuntime2YML.Rd index b43f39ee..865f23f7 100644 --- a/man/writeProcessRuntime2YML.Rd +++ b/man/writeProcessRuntime2YML.Rd @@ -2,8 +2,7 @@ % Please edit documentation in R/assign_job_queue.R \name{writeProcessRuntime2YML} \alias{writeProcessRuntime2YML} -\title{Compute median process runtimes, then write a YAML list of the processes and -their median runtimes in seconds to the path specified by 'filepath'.} +\title{writeProcessRuntime2YML} \usage{ writeProcessRuntime2YML(dir_job_results, filepath = NULL) } @@ -14,6 +13,9 @@ writeProcessRuntime2YML(dir_job_results, filepath = NULL) uses ./molevol_scripts/log_data/job_proc_weights.yml} } \description{ +Compute median process runtimes, then write a YAML list of the processes and +their median runtimes in seconds to the path specified by 'filepath'. + The default value of filepath is the value of the env var MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntime2YML() also uses as its default read location. From df602dfd63cbab0d84dbcc8229e3da9c7646b9d5 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Tue, 22 Oct 2024 13:52:56 -0600 Subject: [PATCH 39/41] https://github.com/JRaviLab/MolEvolvR/pull/95/files#r1805272251 - re-implement dropped check - fix .Rd --- R/assign_job_queue.R | 5 ++++- man/writeProcessRuntime2YML.Rd | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/R/assign_job_queue.R b/R/assign_job_queue.R index 20ba841f..69609417 100644 --- a/R/assign_job_queue.R +++ b/R/assign_job_queue.R @@ -155,7 +155,7 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { #' their median runtimes in seconds to the path specified by 'filepath'. #' #' The default value of filepath is the value of the env var -#' MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntime2YML() also uses as its default +#' MOLEVOLVR_PROC_WEIGHTS, which getProcessRuntimeWeights() also uses as its default #' read location. #' #' @param dir_job_results [chr] path to MolEvolvR job_results directory @@ -173,6 +173,9 @@ writeProcessRuntime2TSV <- function(dir_job_results, filepath) { #' } #' @export writeProcessRuntime2YML <- function(dir_job_results, filepath = NULL) { + if (is.null(filepath)) { + filepath <- file.path(common_root, "molevol_scripts", "log_data", "job_proc_weights.yml") + } medians <- calculateProcessRuntime(dir_job_results) yaml::write_yaml(medians, filepath) } diff --git a/man/writeProcessRuntime2YML.Rd b/man/writeProcessRuntime2YML.Rd index 865f23f7..5e0a05a4 100644 --- a/man/writeProcessRuntime2YML.Rd +++ b/man/writeProcessRuntime2YML.Rd @@ -17,7 +17,7 @@ Compute median process runtimes, then write a YAML list of the processes and their median runtimes in seconds to the path specified by 'filepath'. The default value of filepath is the value of the env var -MOLEVOLVR_PROC_WEIGHTS, which writeProcessRuntime2YML() also uses as its default +MOLEVOLVR_PROC_WEIGHTS, which getProcessRuntimeWeights() also uses as its default read location. } \examples{ From 1a0b66358eac637736a18868ae27e4049aa22628 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Tue, 22 Oct 2024 14:43:47 -0600 Subject: [PATCH 40/41] https://github.com/JRaviLab/MolEvolvR/pull/95#discussion_r1805166466 - adjust roxygen skeleton readability --- R/acc2lin.R | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/R/acc2lin.R b/R/acc2lin.R index 61aae87c..7b6f570c 100644 --- a/R/acc2lin.R +++ b/R/acc2lin.R @@ -198,10 +198,8 @@ efetchIPG <- function(accnums, out_path, plan = "sequential", ...) { #' of an efetch run on the ipg database and #' #' @param accessions Character vector of protein accessions -#' @param ipg_file Filepath to the file -#' containing results of an efetch run on the -#' ipg database. The protein accession in -#' 'accessions' should be contained in this +#' @param ipg_file Filepath to the file containing results of an efetch run on the +#' ipg database. The protein accession in 'accessions' should be contained in this #' file #' @param assembly_path String of the path to the assembly_summary path #' This file can be generated using the \link[MolEvolvR]{downloadAssemblySummary} function From 13e70c75a197c02c395cbef2d7b3c5b991ea7649 Mon Sep 17 00:00:00 2001 From: David Mayer Date: Tue, 22 Oct 2024 15:02:39 -0600 Subject: [PATCH 41/41] formatting --- R/acc2lin.R | 8 ++------ man/efetchIPG.Rd | 3 +-- man/sinkReset.Rd | 1 - 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/R/acc2lin.R b/R/acc2lin.R index 7b6f570c..5f25afe2 100644 --- a/R/acc2lin.R +++ b/R/acc2lin.R @@ -10,7 +10,6 @@ #' Sink Reset #' #' @return No return, but run to close all outstanding `sink()`s -#' and handles any errors or warnings that occur during the process. #' #' @export #' @@ -87,8 +86,7 @@ addLineage <- function(df, acc_col = "AccNum", assembly_path, #' This file can be generated using the \link[MolEvolvR]{downloadAssemblySummary} function #' @param lineagelookup_path String of the path to the lineage lookup file #' (taxid to lineage mapping). This file can be generated using the -#' @param ipgout_path Path to write the results -#' of the efetch run of the accessions +#' @param ipgout_path Path to write the results of the efetch run of the accessions #' on the ipg database. If NULL, the file will not be written. Defaults to NULL #' @param plan A string specifying the parallelization strategy for the future #' package, such as `"sequential"` or `"multisession"`. @@ -122,9 +120,7 @@ acc2Lineage <- function(accessions, assembly_path, lineagelookup_path, ipgout_pa #' #' @author Samuel Chen, Janani Ravi #' -#' @description Perform efetch on the ipg database -#' and write the results to out_path -#' +#' @description Perform efetch on the ipg database and write the results to out_path #' @param accnums Character vector containing the accession numbers to query on #' the ipg database #' @param out_path Path to write the efetch results to diff --git a/man/efetchIPG.Rd b/man/efetchIPG.Rd index db63024f..047e2652 100644 --- a/man/efetchIPG.Rd +++ b/man/efetchIPG.Rd @@ -23,8 +23,7 @@ the ipg database} No return value. The function writes the fetched results to \code{out_path}. } \description{ -Perform efetch on the ipg database -and write the results to out_path +Perform efetch on the ipg database and write the results to out_path Perform efetch on the ipg database and write the results to out_path } diff --git a/man/sinkReset.Rd b/man/sinkReset.Rd index e3fc7ce4..0285c0b2 100644 --- a/man/sinkReset.Rd +++ b/man/sinkReset.Rd @@ -8,7 +8,6 @@ sinkReset() } \value{ No return, but run to close all outstanding \code{sink()}s -and handles any errors or warnings that occur during the process. } \description{ Sink Reset