diff options
author | Peter Amstutz | 2020-04-20 14:57:25 -0400 |
---|---|---|
committer | Peter Amstutz | 2020-04-20 14:57:25 -0400 |
commit | 9ddcfeacb3191638f42b08af999889d867f0f81c (patch) | |
tree | 4cfe4c2b1df38bf6e5c79f5f8c0700407f76a472 /workflows/pangenome-generate/merge-metadata.py | |
parent | d29dfd593233541b85c1cefb239650279d57d59f (diff) | |
download | bh20-seq-resource-9ddcfeacb3191638f42b08af999889d867f0f81c.tar.gz bh20-seq-resource-9ddcfeacb3191638f42b08af999889d867f0f81c.tar.lz bh20-seq-resource-9ddcfeacb3191638f42b08af999889d867f0f81c.zip |
Better handling of duplicate sequences
Also save original fasta label in metadata
Diffstat (limited to 'workflows/pangenome-generate/merge-metadata.py')
-rw-r--r-- | workflows/pangenome-generate/merge-metadata.py | 21 |
1 files changed, 20 insertions, 1 deletions
diff --git a/workflows/pangenome-generate/merge-metadata.py b/workflows/pangenome-generate/merge-metadata.py index 64275b1..bfec781 100644 --- a/workflows/pangenome-generate/merge-metadata.py +++ b/workflows/pangenome-generate/merge-metadata.py @@ -1,9 +1,13 @@ +import re import schema_salad.schema import schema_salad.jsonld_context +import json metadataSchema = '$(inputs.metadataSchema.path)' metadata = $(inputs.metadata) subjects = $(inputs.subjects) +dups = json.loads('''$(inputs.dups)''') +originalLabels = $(inputs.originalLabels) (document_loader, avsc_names, @@ -11,7 +15,22 @@ subjects = $(inputs.subjects) metaschema_loader) = schema_salad.schema.load_schema(metadataSchema) for i, m in enumerate(metadata): - doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, m["path"], True) + doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, m["path"], False, False) doc["id"] = subjects[i] g = schema_salad.jsonld_context.makerdf(subjects[i], doc, document_loader.ctx) print(g.serialize(format="ntriples").decode("utf-8")) + +import logging + +if dups: + sameseqs = open(dups["path"], "rt") + for d in sameseqs: + logging.warn(d) + g = re.match(r"\\d+\\t(.*)", d) + logging.warn("%s", g.group(1)) + sp = g.group(1).split(",") + for n in sp[1:]: + print("<%s> <http://biohackathon.org/bh20-seq-schema/has_duplicate_sequence> <%s> ." % (n.strip(), sp[0].strip())) + +orig = open(originalLabels["path"], "rt") +print(orig.read()) |