From 9ddcfeacb3191638f42b08af999889d867f0f81c Mon Sep 17 00:00:00 2001 From: Peter Amstutz Date: Mon, 20 Apr 2020 14:57:25 -0400 Subject: Better handling of duplicate sequences Also save original fasta label in metadata --- workflows/pangenome-generate/merge-metadata.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'workflows/pangenome-generate/merge-metadata.py') diff --git a/workflows/pangenome-generate/merge-metadata.py b/workflows/pangenome-generate/merge-metadata.py index 64275b1..bfec781 100644 --- a/workflows/pangenome-generate/merge-metadata.py +++ b/workflows/pangenome-generate/merge-metadata.py @@ -1,9 +1,13 @@ +import re import schema_salad.schema import schema_salad.jsonld_context +import json metadataSchema = '$(inputs.metadataSchema.path)' metadata = $(inputs.metadata) subjects = $(inputs.subjects) +dups = json.loads('''$(inputs.dups)''') +originalLabels = $(inputs.originalLabels) (document_loader, avsc_names, @@ -11,7 +15,22 @@ subjects = $(inputs.subjects) metaschema_loader) = schema_salad.schema.load_schema(metadataSchema) for i, m in enumerate(metadata): - doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, m["path"], True) + doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, m["path"], False, False) doc["id"] = subjects[i] g = schema_salad.jsonld_context.makerdf(subjects[i], doc, document_loader.ctx) print(g.serialize(format="ntriples").decode("utf-8")) + +import logging + +if dups: + sameseqs = open(dups["path"], "rt") + for d in sameseqs: + logging.warn(d) + g = re.match(r"\\d+\\t(.*)", d) + logging.warn("%s", g.group(1)) + sp = g.group(1).split(",") + for n in sp[1:]: + print("<%s> <%s> ." % (n.strip(), sp[0].strip())) + +orig = open(originalLabels["path"], "rt") +print(orig.read()) -- cgit v1.2.3