From 61726edb9293fe529e6efbe5bb6f1cc953bb3c4e Mon Sep 17 00:00:00 2001 From: Peter Amstutz Date: Tue, 21 Apr 2020 16:20:10 -0400 Subject: Workaround CWL limit by chunking file list Arvados-DCO-1.1-Signed-off-by: Peter Amstutz --- workflows/pangenome-generate/merge-metadata.cwl | 46 +++++++++++++++++++++---- workflows/pangenome-generate/merge-metadata.py | 33 ++++++++++++------ workflows/pangenome-generate/relabel-seqs.cwl | 12 ++++--- workflows/pangenome-generate/relabel-seqs.py | 18 +++++----- 4 files changed, 79 insertions(+), 30 deletions(-) (limited to 'workflows/pangenome-generate') diff --git a/workflows/pangenome-generate/merge-metadata.cwl b/workflows/pangenome-generate/merge-metadata.cwl index fcefe32..4d9c808 100644 --- a/workflows/pangenome-generate/merge-metadata.cwl +++ b/workflows/pangenome-generate/merge-metadata.cwl @@ -5,16 +5,48 @@ hints: dockerPull: commonworkflowlanguage/cwltool_module inputs: metadata: File[] - metadataSchema: File subjects: string[] - dups: File? - originalLabels: File + metadataSchema: + type: File + inputBinding: {position: 2} + originalLabels: + type: File + inputBinding: {position: 3} + dups: + type: File? + inputBinding: {position: 4} + script: + type: File + inputBinding: {position: 1} + default: {class: File, location: merge-metadata.py} outputs: merged: stdout stdout: mergedmetadata.ttl requirements: + InlineJavascriptRequirement: {} InitialWorkDirRequirement: - listing: - - entry: {$include: merge-metadata.py} - entryname: merge-metadata.py -baseCommand: [python3, merge-metadata.py] + listing: | + ${ + var i = 0; + var b = 1; + var out = []; + for (; i < inputs.metadata.length; i++) { + var block = []; + var sub = []; + for (; i < (b*150) && i < inputs.metadata.length; i++) { + block.push(inputs.metadata[i]); + sub.push(inputs.subjects[i]); + } + out.push({ + entryname: "block"+b, + entry: JSON.stringify(block) + }); + out.push({ + entryname: "subs"+b, + entry: JSON.stringify(sub) + }); + b++; + } + return out; + } +baseCommand: python diff --git a/workflows/pangenome-generate/merge-metadata.py b/workflows/pangenome-generate/merge-metadata.py index bfec781..65d08a6 100644 --- a/workflows/pangenome-generate/merge-metadata.py +++ b/workflows/pangenome-generate/merge-metadata.py @@ -2,12 +2,27 @@ import re import schema_salad.schema import schema_salad.jsonld_context import json +import sys +import os +import logging + +metadataSchema = sys.argv[1] +originalLabels = sys.argv[2] +dups = None +if len(sys.argv) == 4: + dups = sys.argv[3] + +def readitems(stem): + items = [] + b = 1 + while os.path.exists("%s%i" % (stem, b)): + with open("%s%i" % (stem, b)) as f: + items.extend(json.load(f)) + b += 1 + return items -metadataSchema = '$(inputs.metadataSchema.path)' -metadata = $(inputs.metadata) -subjects = $(inputs.subjects) -dups = json.loads('''$(inputs.dups)''') -originalLabels = $(inputs.originalLabels) +metadata = readitems("block") +subjects = readitems("subs") (document_loader, avsc_names, @@ -20,17 +35,15 @@ for i, m in enumerate(metadata): g = schema_salad.jsonld_context.makerdf(subjects[i], doc, document_loader.ctx) print(g.serialize(format="ntriples").decode("utf-8")) -import logging - if dups: - sameseqs = open(dups["path"], "rt") + sameseqs = open(dups, "rt") for d in sameseqs: logging.warn(d) - g = re.match(r"\\d+\\t(.*)", d) + g = re.match(r"\d+\t(.*)", d) logging.warn("%s", g.group(1)) sp = g.group(1).split(",") for n in sp[1:]: print("<%s> <%s> ." % (n.strip(), sp[0].strip())) -orig = open(originalLabels["path"], "rt") +orig = open(originalLabels, "rt") print(orig.read()) diff --git a/workflows/pangenome-generate/relabel-seqs.cwl b/workflows/pangenome-generate/relabel-seqs.cwl index 01196f6..c1f17a4 100644 --- a/workflows/pangenome-generate/relabel-seqs.cwl +++ b/workflows/pangenome-generate/relabel-seqs.cwl @@ -26,19 +26,21 @@ requirements: var out = []; for (; i < inputs.readsFA.length; i++) { var block = []; - for (; i < (b*100) && i < inputs.readsFA.length; i++) { + var sub = []; + for (; i < (b*150) && i < inputs.readsFA.length; i++) { block.push(inputs.readsFA[i]); + sub.push(inputs.subjects[i]); } out.push({ entryname: "block"+b, entry: JSON.stringify(block) }); + out.push({ + entryname: "subs"+b, + entry: JSON.stringify(sub) + }); b++; } - out.push({ - entry: JSON.stringify(inputs.subjects), - entryname: "subjects" - }); return out; } hints: diff --git a/workflows/pangenome-generate/relabel-seqs.py b/workflows/pangenome-generate/relabel-seqs.py index 970540f..6b022a0 100644 --- a/workflows/pangenome-generate/relabel-seqs.py +++ b/workflows/pangenome-generate/relabel-seqs.py @@ -1,15 +1,17 @@ import os import json -reads = [] -b = 1 -while os.path.exists("block%i" % b): - with open("block%i" % b) as f: - reads.extend(json.load(f)) - b += 1 +def readitems(stem): + items = [] + b = 1 + while os.path.exists("%s%i" % (stem, b)): + with open("%s%i" % (stem, b)) as f: + items.extend(json.load(f)) + b += 1 + return items -with open("subjects") as f: - subjects = json.load(f) +reads = readitems("block") +subjects = readitems("subs") relabeled_fasta = open("relabeledSeqs.fasta", "wt") original_labels = open("originalLabels.ttl", "wt") -- cgit v1.2.3