Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

enabled storeDir for dsl2 #1003

Merged
merged 14 commits into from
Apr 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ before_install:
- chmod 777 nextflow
# to change the test-data for travis, please download using the following command, extract, make changes, tarball again with gzip, and upload to google drive.
# you will have to change the link below as well. Click to share the link, making it so anyone with the link can access, then extract the id in the link and put it here after "id="
- wget -O test-data.tar.gz --no-check-certificate 'https://docs.google.com/uc?export=download&confirm=no_antivirus&id=13zUVw4BZ0_5QAW7zmdCsZ_CZDHbNxyMV'
- wget -O test-data.tar.gz --no-check-certificate 'https://drive.usercontent.google.com/download?export=download&confirm=no_antivirus&id=13zUVw4BZ0_5QAW7zmdCsZ_CZDHbNxyMV'
- tar -xzvf test-data.tar.gz

script:
Expand Down
1 change: 0 additions & 1 deletion modules/process/Alignment/RunBQSR.nf
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

output:
tuple val(idSample), val(target), path("${idSample}.bam"), path("${idSample}.bam.bai"), emit: bamsBQSR
tuple val(idSample), val(target), val("${params.outDir}/bams/${idSample}/${idSample}.bam"), val("${params.outDir}/bams/${idSample}/${idSample}.bam.bai"), emit: bamResults
path("file-size.txt"), emit: bamSize

script:
Expand Down
11 changes: 5 additions & 6 deletions modules/process/Facets/DoFacets.nf
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,16 @@ process DoFacets {
tuple val(idTumor), val(idNormal), val(target), path(bamTumor), path(baiTumor), path(bamNormal), path(baiNormal)
file(facetsVcf)
path(custom_scripts)
val(outputDir)

output:
path("${outfile}"), emit: snpPileupOutput
path("${outputDir}/*"), emit: FacetsOutput
tuple val("placeHolder"), val(idTumor), val(idNormal), path("*/*_purity.seg"), path("*/*_hisens.seg"), path("*_OUT.txt"), path("*/*.arm_level.txt"), path("*/*.gene_level.txt"), emit: facets4Aggregate
tuple val(idTumor), val(idNormal), path("*/*_purity.seg"), path("*/*_hisens.seg"), path("*_OUT.txt"), path("*/*.arm_level.txt"), path("*/*.gene_level.txt"), emit: facets4Aggregate
tuple val(idTumor), val(idNormal), val(target), path("${outputDir}/*purity.out"), emit: facetsPurity
tuple val(idTumor), val(idNormal), val(target), path("${outputDir}/*hisens.Rdata"), val("${outputDir}"), emit: facetsForMafAnno
tuple val(idTumor), val(idNormal), val(target), path("${outputDir}/*.{Rdata,png,out,seg,txt}"), path("${idTumor}__${idNormal}.snp_pileup.gz"), val("${outputDir}"), emit: Facets4FacetsPreview
tuple val("placeHolder"), val(idTumor), val(idNormal), path("*/*.*_level.txt"), emit: FacetsArmGeneOutput
tuple val(idTumor), val(idNormal), val(target), path("${outputDir}/*hisens.Rdata"), val(outputDir), emit: facetsForMafAnno
tuple val(idTumor), val(idNormal), val(target), path("${outputDir}/*.{Rdata,png,out,seg,txt}"), path("${idTumor}__${idNormal}.snp_pileup.gz"), val(outputDir), emit: Facets4FacetsPreview
tuple val(idTumor), val(idNormal), path("*/*.*_level.txt"), emit: FacetsArmGeneOutput
tuple val(idTumor), val(idNormal), val(target), path("*/*.qc.txt"), emit: FacetsQC4MetaDataParser
tuple val(idTumor), val(idNormal), path("*_OUT.txt"), emit: FacetsRunSummary
tuple val(idTumor), val(idNormal), val(target), path("${tag}_hisens.facets.copynumber.csv"), emit: FacetsHisensCNV4HrDetect
Expand All @@ -30,8 +31,6 @@ process DoFacets {
script:
tag = outputFacetsSubdirectory = "${idTumor}__${idNormal}"
outfile = tag + ".snp_pileup.gz"
outputDir = "facets${params.facets.R_lib}c${params.facets.cval}pc${params.facets.purity_cval}"

"""
touch .Rprofile

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ process GermlineCombineHaplotypecallerVcf {
tuple file(genomeFile), file(genomeIndex), file(genomeDict)

output:
tuple val("placeHolder"), val(idNormal), val(target), path("${outfile}"), path("${outfile}.tbi"), emit: haplotypecallerCombinedVcfOutput
tuple val(idNormal), val(target), path("${outfile}"), path("${outfile}.tbi"), emit: haplotypecallerCombinedVcfOutput

script:
idNormal = id.toString().split("@")[0]
Expand Down
2 changes: 1 addition & 1 deletion modules/process/GermSNV/GermlineFacetsAnnotation.nf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ process GermlineFacetsAnnotation {

output:
path("${outputPrefix}.final.maf"), emit: mafFileOutputGermline
tuple val("placeHolder"), val(idTumor), val(idNormal), file("${outputPrefix}.final.maf"), emit: mafFile4AggregateGermline
tuple val(idTumor), val(idNormal), file("${outputPrefix}.final.maf"), emit: mafFile4AggregateGermline

script:
outputPrefix = "${idTumor}__${idNormal}.germline"
Expand Down
2 changes: 1 addition & 1 deletion modules/process/GermSNV/GermlineRunStrelka2.nf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ process GermlineRunStrelka2 {
tuple path(genomeFile), path(genomeIndex), path(genomeDict)

output:
tuple val("placeHolder"), val(idNormal), val(target), path("${idNormal}.strelka2.vcf.gz"), path("${idNormal}.strelka2.vcf.gz.tbi"), emit: strelkaOutputGermline
tuple val(idNormal), val(target), path("${idNormal}.strelka2.vcf.gz"), path("${idNormal}.strelka2.vcf.gz.tbi"), emit: strelkaOutputGermline

script:
options = ""
Expand Down
4 changes: 2 additions & 2 deletions modules/process/HRDetect/HRDetect.nf
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ process HRDetect {
path(HRDetect_script)

output:
tuple val("placeHolder"), val(idTumor), val(idNormal), path("${outputPrefix}.hrdetect.tsv")
tuple val(idTumor), val(idNormal), path("${outputPrefix}.hrdetect.tsv")

when: params.assayType == "genome"

Expand All @@ -22,4 +22,4 @@ process HRDetect {
Rscript ${HRDetect_script} ${outputPrefix}.tsv ${genome_version} ${task.cpus}
"""

}
}
2 changes: 1 addition & 1 deletion modules/process/LoH/RunLOHHLA.nf
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ process RunLOHHLA {

output:
tuple path("*.DNA.HLAlossPrediction_CI.txt"), path("*DNA.IntegerCPN_CI.txt"), path("*.pdf"), path("*.RData"), optional: true, emit: lohhlaOutput
tuple val("placeHolder"), val(idTumor), val(idNormal), file("*.DNA.HLAlossPrediction_CI.txt"), file("*DNA.IntegerCPN_CI.txt"), emit: lohhla4Aggregate
tuple val(placeHolder), val(idTumor), val(idNormal), file("*.DNA.HLAlossPrediction_CI.txt"), file("*DNA.IntegerCPN_CI.txt"), emit: lohhla4Aggregate

script:
outputPrefix = "${idTumor}__${idNormal}"
Expand Down
2 changes: 1 addition & 1 deletion modules/process/LoH/RunPolysolver.nf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ process RunPolysolver {
tuple val(idNormal), val(target), path(bamNormal), path(baiNormal)

output:
tuple val("placeHolder"), val(idNormal), val(target), path("${outputPrefix}.hla.txt"), emit: hlaOutput
tuple val(idNormal), val(target), path("${outputPrefix}.hla.txt"), emit: hlaOutput

script:
outputPrefix = "${idNormal}"
Expand Down
2 changes: 1 addition & 1 deletion modules/process/MetaParse/MetaDataParser.nf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ process MetaDataParser {

output:
path("*.sample_data.txt"), emit: MetaDataOutput
tuple val("placeHolder"), val(idTumor), val(idNormal), path("*.sample_data.txt"), emit: MetaData4Aggregate
tuple val(placeHolder), val(idTumor), val(idNormal), path("*.sample_data.txt"), emit: MetaData4Aggregate
gongyixiao marked this conversation as resolved.
Show resolved Hide resolved

script:
codingRegionsBed = codingBed
Expand Down
4 changes: 2 additions & 2 deletions modules/process/QC/QcConpair.nf
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ process QcConpair {
tuple path(genomeFile), path(genomeIndex), path(genomeDict)

output:
tuple val("placeHolder"), val(idTumor), val(idNormal), path("${outPrefix}.{concordance,contamination}.txt"), emit: conpairOutput
tuple val("placeHolder"), val(idTumor), val(idNormal), path("${outPrefix}.concordance.txt"), path("${outPrefix}.contamination.txt"), emit: conpair4Aggregate
tuple val(idTumor), val(idNormal), path("${outPrefix}.{concordance,contamination}.txt"), emit: conpairOutput
tuple val(idTumor), val(idNormal), path("${outPrefix}.concordance.txt"), path("${outPrefix}.contamination.txt"), emit: conpair4Aggregate

script:
outPrefix = "${idTumor}__${idNormal}"
Expand Down
4 changes: 2 additions & 2 deletions modules/process/QC/QcConpairAll.nf
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ process QcConpairAll {
tuple path(genomeFile), path(genomeIndex), path(genomeDict)

output:
tuple val("placeHolder"), val(idTumor), val(idNormal), path("${outPrefix}.{concordance,contamination}.txt"), emit: conpairAllOutput
tuple val("placeHolder"), val(idTumor), val(idNormal), path("${outPrefix}.concordance.txt"), path("${outPrefix}.contamination.txt"), emit: conpairAll4Aggregate
tuple val(idTumor), val(idNormal), path("${outPrefix}.{concordance,contamination}.txt"), emit: conpairAllOutput
tuple val(idTumor), val(idNormal), path("${outPrefix}.concordance.txt"), path("${outPrefix}.contamination.txt"), emit: conpairAll4Aggregate

script:
outPrefix = "${idTumor}__${idNormal}"
Expand Down
2 changes: 1 addition & 1 deletion modules/process/SNV/RunNeoantigen.nf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ process RunNeoantigen {
tuple path(neoantigenCDNA), path(neoantigenCDS)

output:
tuple val("placeHolder"), val(idTumor), val(idNormal), path("${idTumor}__${idNormal}.all_neoantigen_predictions.txt"), emit: NetMhcStats4Aggregate
tuple val(placeHolder), val(idTumor), val(idNormal), path("${idTumor}__${idNormal}.all_neoantigen_predictions.txt"), emit: NetMhcStats4Aggregate
path("${idTumor}__${idNormal}.all_neoantigen_predictions.txt"), emit: NetMhcStatsOutput
tuple val(idTumor), val(idNormal), val(target), path("${outputDir}/${outputPrefix}.neoantigens.maf"), emit: mafFileForMafAnno

Expand Down
2 changes: 1 addition & 1 deletion modules/process/SNV/SomaticFacetsAnnotation.nf
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ process SomaticFacetsAnnotation {
tuple val(idTumor), val(idNormal), val(target), path(hisens_rdata), val(facetsPath), path(maf)

output:
tuple val("placeHolder"), val(idTumor), val(idNormal), path("${outputPrefix}.somatic.final.maf"), emit: finalMaf4Aggregate
tuple val(idTumor), val(idNormal), path("${outputPrefix}.somatic.final.maf"), emit: finalMaf4Aggregate
path("file-size.txt"), emit: mafSize
path("${outputPrefix}.somatic.final.maf"), emit: finalMafOutput
tuple val(idTumor), val(idNormal), val(target), path("${outputPrefix}.somatic.final.maf"), emit: maf4MetaDataParser
Expand Down
2 changes: 1 addition & 1 deletion modules/process/SV/SomaticAnnotateSVBedpe.nf
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ process SomaticAnnotateSVBedpe {
output:
tuple val(idTumor), val(idNormal), val(target), path("${outputPrefix}.unfiltered.bedpe"), emit: SVAnnotBedpe
tuple val(idTumor), val(idNormal), val(target), path("${outputPrefix}.final.bedpe"), emit: SVAnnotBedpePass
tuple val("placeholder"), val(idTumor), val(idNormal), path("${outputPrefix}.final.bedpe"), emit: SVAnnotBedpe4Aggregate
tuple val(idTumor), val(idNormal), path("${outputPrefix}.final.bedpe"), emit: SVAnnotBedpe4Aggregate

script:
outputPrefix = "${idTumor}__${idNormal}"
Expand Down
2 changes: 1 addition & 1 deletion modules/process/SV/SomaticRunClusterSV.nf
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ process SomaticRunClusterSV {
path("${outputPrefix}.sv_clusters_and_footprints.tsv"),
path("${outputPrefix}.sv_distance_pvals"),
path("${clusteredBedpe}"), emit: clusterSvOutput
tuple val("placeholder"), val(idTumor), val(idNormal),
tuple val(idTumor), val(idNormal),
path("${clusteredBedpe}"), emit: Bedpe4Aggregate

when: ["GRCh37","GRCh38","smallGRCh37"].contains(params.genome)
Expand Down
2 changes: 1 addition & 1 deletion modules/process/SVclone/SomaticRunSVclone.nf
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ output:
path("${outputPrefix}"), emit: SVcloneOutput
tuple val(idTumor), val(idNormal), val(target),
path("svclone/*"), emit: SVclonePublish
tuple val("placeHolder"), val(idTumor), val(idNormal),
tuple val(idTumor), val(idNormal),
path("svclone/svs/*cluster_certainty.txt"),
path("svclone/snvs/*cluster_certainty.txt"), emit: SVclone4Aggregate

Expand Down
53 changes: 27 additions & 26 deletions modules/subworkflow/alignment_wf.nf
Original file line number Diff line number Diff line change
Expand Up @@ -134,28 +134,27 @@ workflow alignment_wf

// Check for FASTQ files which might have different path but contains the same reads, based only on the name of the first read.
def allReadIds = [:]
AlignReads.out.sortedBam.map { idSample, target, bam, fileID, lane, readIdFile -> def readId = '@' + readIdFile.getSimpleName().replaceAll('@', ':')
// Use the first line of the fastq file (the name of the first read) as unique identifier to check across all the samples if there is any two fastq files contains the same read name, if so, we consider there are some human error of mixing up the same reads into different fastq files
if ( !params.watch ) {
if (!TempoUtils.checkDuplicates(allReadIds, readId, idSample + "\t" + bam, "the following samples, since they contain the same read: \n${ readId }")) {exit 1}
AlignReads.out.sortedBam
.groupTuple(by:[3])
.map { idSample, target, bam, fileID, lane, readIdFile ->
def idSample_first = idSample instanceof Collection ? idSample.first() : idSample
def target_first = target instanceof Collection ? target.first() : target
if ( !params.watch ){
for (i in readIdFile.flatten().unique()){
def readId = "@" + i.getSimpleName().replaceAll("@", ":")
if(!TempoUtils.checkDuplicates(allReadIds, readId, idSample_first + "\t" + fileID, "the following samples, since they contain the same read: \n${readId}")){exit 1}
}
}
[idSample_first, target_first, bam.flatten().unique()]
}
[idSample, target, bam, fileID, lane]
}
.groupTuple(by: [3])
.map { item ->
def idSample = item[0] instanceof Collection ? item[0].first() : item[0]
def target = item[1] instanceof Collection ? item[1].first() : item[1]
def bams = item[2]
[idSample, target, bams]
}
.groupTuple(by: [0])
.map { item ->
def idSample = item[0]
def target = item[1] instanceof Collection ? item[1].first() : item[1]
def bams = item[2].flatten()
[idSample, bams, target]
}
.set { groupedBam }
.groupTuple(by: [0])
.map{ item ->
def idSample = item[0]
def target = item[1] instanceof Collection ? item[1].first() : item[1]
def bams = item[2].flatten().unique()
[idSample, bams, target]
}
.set { groupedBam }

MergeBamsAndMarkDuplicates(groupedBam)
RunBQSR(MergeBamsAndMarkDuplicates.out.mdBams,
Expand All @@ -170,13 +169,16 @@ workflow alignment_wf
]))


File file = new File(params.outname)
file.newWriter().withWriter { w ->
File file_bammapping = new File(params.outname)
file_bammapping.newWriter().withWriter { w ->
w << "SAMPLE\tTARGET\tBAM\tBAI\n"
}

RunBQSR.out.bamResults.subscribe { Object obj ->
file.withWriterAppend { out ->
RunBQSR.out.bamsBQSR
.map{ idSample, target, bam, bai ->
[ idSample, target, "${file(params.outDir).toString()}/bams/${idSample}/${idSample}.bam", "${file(params.outDir).toString()}/bams/${idSample}/${idSample}.bam.bai" ]
}.subscribe { Object obj ->
file_bammapping.withWriterAppend { out ->
out.println "${obj[0]}\t${obj[1]}\t${obj[2]}\t${obj[3]}"
}
}
Expand All @@ -191,7 +193,6 @@ workflow alignment_wf

emit:
RunBQSR_bamsBQSR = RunBQSR.out.bamsBQSR
RunBQSR_bamResults = RunBQSR.out.bamResults
RunBQSR_bamSize = RunBQSR.out.bamSize
fastPJson = fastPJson
}
3 changes: 2 additions & 1 deletion modules/subworkflow/clonality_wf.nf
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ workflow clonality_wf {
SVcloneInput,
workflow.projectDir + "/containers/svclone/svclone_wrapper.py"
)
svclone4Aggregate = SomaticRunSVclone.out.SVclone4Aggregate.map{ ["placeHolder"] + it }

emit:
svclone4Aggregate = SomaticRunSVclone.out.SVclone4Aggregate
svclone4Aggregate = svclone4Aggregate
}
18 changes: 15 additions & 3 deletions modules/subworkflow/facets_wf.nf
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@ workflow facets_wf
main:
referenceMap = params.referenceMap
targetsMap = params.targetsMap
outputDir = "facets${params.facets.R_lib}c${params.facets.cval}pc${params.facets.purity_cval}"

DoFacets(
bamFiles,
referenceMap.facetsVcf,
workflow.projectDir + "/containers/facets-suite-preview-htstools"
workflow.projectDir + "/containers/facets-suite-preview-htstools",
outputDir
)

DoFacetsPreviewQC(DoFacets.out.Facets4FacetsPreview)
Expand All @@ -24,14 +26,24 @@ workflow facets_wf
["placeholder",idTumor, idNormal, summaryFiles, qcFiles]
}.set{ FacetsQC4Aggregate }

DoFacets.out.facets4Aggregate
.map{
["placeHolder"] + it
}.set{facets4Aggregate}

DoFacets.out.FacetsArmGeneOutput
.map{
["placeHolder"] + it
}.set{FacetsArmGeneOutput}

emit:
snpPileupOutput = DoFacets.out.snpPileupOutput
FacetsOutput = DoFacets.out.FacetsOutput
facets4Aggregate = DoFacets.out.facets4Aggregate
facets4Aggregate = facets4Aggregate
facetsPurity = DoFacets.out.facetsPurity
facetsForMafAnno = DoFacets.out.facetsForMafAnno
Facets4FacetsPreview = DoFacets.out.Facets4FacetsPreview
FacetsArmGeneOutput = DoFacets.out.FacetsArmGeneOutput
FacetsArmGeneOutput = FacetsArmGeneOutput
FacetsQC4MetaDataParser = DoFacets.out.FacetsQC4MetaDataParser
FacetsRunSummary = DoFacets.out.FacetsRunSummary
FacetsPreviewOut = DoFacetsPreviewQC.out.FacetsPreviewOut
Expand Down
10 changes: 8 additions & 2 deletions modules/subworkflow/germlineSNV_wf.nf
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,12 @@ workflow germlineSNV_wf
Channel.value([referenceMap.genomeFile, referenceMap.genomeIndex, referenceMap.genomeDict]))

// Join HaploTypeCaller and Strelka outputs, bcftools.
GermlineCombineHaplotypecallerVcf.out.haplotypecallerCombinedVcfOutput.combine(GermlineRunStrelka2.out.strelkaOutputGermline, by: [0,1,2])
GermlineCombineHaplotypecallerVcf.out.haplotypecallerCombinedVcfOutput
.map{ ["placeHolder"] + it }
.combine(
GermlineRunStrelka2.out.strelkaOutputGermline.map{ ["placeHolder"] + it },
by: [0,1,2]
)
.combine(bamsTumor, by: [1,2])
.set{ mergedChannelVcfCombine }

Expand All @@ -71,7 +76,8 @@ workflow germlineSNV_wf
.set{ facetsMafFileGermline }

GermlineFacetsAnnotation(facetsMafFileGermline)
snv4AggregateGermline = GermlineFacetsAnnotation.out.mafFile4AggregateGermline.map{ ["placeHolder"] + it }

emit:
snv4AggregateGermline = GermlineFacetsAnnotation.out.mafFile4AggregateGermline
snv4AggregateGermline = snv4AggregateGermline
}
4 changes: 3 additions & 1 deletion modules/subworkflow/hrdetect_wf.nf
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ workflow hrdetect_wf {
workflow.projectDir + "/containers/signaturetoolslib/HRDetect_wrapper.R"
)

HRDetectOut = HRDetect.out.map{ ["placeHolder"] + it }

emit:
HRDetect = HRDetect.out
HRDetect = HRDetectOut

}
5 changes: 3 additions & 2 deletions modules/subworkflow/loh_wf.nf
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,16 @@ workflow loh_wf
targetsMap = params.targetsMap

RunPolysolver(bams)
hlaOutput = RunPolysolver.out.hlaOutput.map{ ["placeHolder"] + it }

bamFiles.combine(facetsPurity, by: [0,1,2])
.combine(RunPolysolver.out.hlaOutput, by: [1,2])
.combine(hlaOutput, by: [1,2])
.set{ mergedChannelLOHHLA }

RunLOHHLA(mergedChannelLOHHLA,
Channel.value([referenceMap.hlaFasta, referenceMap.hlaDat]))

emit:
hlaOutput = RunPolysolver.out.hlaOutput
hlaOutput = hlaOutput
lohhla4Aggregate = RunLOHHLA.out.lohhla4Aggregate
}
6 changes: 5 additions & 1 deletion modules/subworkflow/samplePairingQC_wf.nf
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
include { QcPileup } from '../process/QC/QcPileup'
include { QcConpair } from '../process/QC/QcConpair'
include { QcConpairAll } from '../process/QC/QcConpairAll'

workflow samplePairingQC_wf
{
Expand Down Expand Up @@ -49,16 +50,19 @@ workflow samplePairingQC_wf
pileupT.combine(pileupN, by: [0, 1]).unique().set{ pileupConpair }

QcConpair(pileupConpair, Channel.value([referenceMap.genomeFile, referenceMap.genomeIndex, referenceMap.genomeDict]))
conpair4Aggregate = QcConpair.out.conpair4Aggregate.map{ ["placeHolder"] + it }
conpairOutput = QcConpair.out.conpairOutput.map{ ["placeHolder"] + it }

if(runConpairAll){
pileupT.combine(pileupN).unique().set{ pileupConpairAll }

QcConpairAll(pileupConpairAll,
Channel.value([referenceMap.genomeFile, referenceMap.genomeIndex, referenceMap.genomeDict]))
conpairAll4Aggregate = QcConpairAll.out.conpairAll4Aggregate.map{ ["placeHolder"] + it }
}

emit:
// -- Run based on QcConpairAll channels or the single QcConpair channels
conpair4Aggregate = (!runConpairAll ? QcConpair.out.conpair4Aggregate : QcConpairAll.out.conpairAll4Aggregate)
conpair4Aggregate = (!runConpairAll ? conpair4Aggregate : conpairAll4Aggregate)
conpairOutput = QcConpair.out.conpairOutput
}
Loading