aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bh20seqanalyzer/main.py134
-rw-r--r--bh20sequploader/main.py29
-rw-r--r--bh20sequploader/qc_fasta.py73
-rw-r--r--bh20sequploader/qc_metadata.py21
-rw-r--r--scripts/create_sra_metadata/SraExperimentPackage.2020.07.05.xml.gzbin0 -> 6502056 bytes
-rw-r--r--scripts/create_sra_metadata/create_sra_metadata.py (renamed from scripts/download_sra_data/download_sra_data.py)52
-rw-r--r--scripts/dict_ontology_standardization/ncbi_countries.csv37
-rw-r--r--scripts/dict_ontology_standardization/ncbi_host_species.csv1
-rw-r--r--scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv23
-rw-r--r--scripts/docker/Dockerfile4
-rwxr-xr-xscripts/download_genbank_data/from_genbank_to_fasta_and_yaml.py125
-rw-r--r--scripts/download_sra_data/SraExperimentPackage.2020.06.08.xml.gzbin3227724 -> 0 bytes
-rw-r--r--scripts/fetch_from_genbank.cwl49
-rw-r--r--scripts/import.cwl2
-rw-r--r--scripts/import_from_genbank.cwl27
-rw-r--r--scripts/import_to_arvados.py2
-rw-r--r--scripts/split_into_arrays.cwl30
-rw-r--r--scripts/upload.cwl13
-rw-r--r--semantic_enrichment/countries.ttl79
-rw-r--r--semantic_enrichment/labels.ttl2
20 files changed, 491 insertions, 212 deletions
diff --git a/bh20seqanalyzer/main.py b/bh20seqanalyzer/main.py
index 9a36cae..1746587 100644
--- a/bh20seqanalyzer/main.py
+++ b/bh20seqanalyzer/main.py
@@ -20,26 +20,36 @@ def validate_upload(api, collection, validated_project,
fastq_project, fastq_workflow_uuid):
col = arvados.collection.Collection(collection["uuid"])
+ if collection.get("status") in ("validated", "rejected"):
+ return False
+
# validate the collection here. Check metadata, etc.
- valid = True
+ logging.info("Validating upload '%s' (%s)" % (collection["name"], collection["uuid"]))
- if "metadata.yaml" not in col:
- logging.warn("Upload '%s' missing metadata.yaml", collection["name"])
- valid = False
- else:
- try:
- metadata_content = ruamel.yaml.round_trip_load(col.open("metadata.yaml"))
- metadata_content["id"] = "http://arvados.org/keep:%s/metadata.yaml" % collection["portable_data_hash"]
- sample_id = metadata_content["sample"]["sample_id"]
- add_lc_filename(metadata_content, metadata_content["id"])
- valid = qc_metadata(metadata_content) and valid
- except Exception as e:
- logging.warn(e)
- valid = False
- if not valid:
- logging.warn("Failed metadata qc")
-
- if valid:
+ errors = []
+
+ dup = api.collections().list(filters=[["owner_uuid", "=", validated_project],
+ ["portable_data_hash", "=", col.portable_data_hash()]]).execute()
+ if dup["items"]:
+ # This exact collection has been uploaded before.
+ errors.append("Duplicate of %s" % ([d["uuid"] for d in dup["items"]]))
+
+ if not errors:
+ if "metadata.yaml" not in col:
+ errors.append("Missing metadata.yaml", collection["name"])
+ else:
+ try:
+ metadata_content = ruamel.yaml.round_trip_load(col.open("metadata.yaml"))
+ metadata_content["id"] = "http://arvados.org/keep:%s/metadata.yaml" % collection["portable_data_hash"]
+ sample_id = metadata_content["sample"]["sample_id"]
+ add_lc_filename(metadata_content, metadata_content["id"])
+ valid = qc_metadata(metadata_content)
+ if not valid:
+ errors.append("Failed metadata qc")
+ except Exception as e:
+ errors.append(str(e))
+
+ if not errors:
try:
tgt = None
paired = {"reads_1.fastq": "reads.fastq", "reads_1.fastq.gz": "reads.fastq.gz"}
@@ -49,36 +59,32 @@ def validate_upload(api, collection, validated_project,
with col.open(n, 'rb') as qf:
tgt = qc_fasta(qf)[0]
if tgt != n and tgt != paired.get(n):
- logging.info("Expected %s but magic says it should be %s", n, tgt)
- valid = False
+ errors.append("Expected %s but magic says it should be %s", n, tgt)
elif tgt in ("reads.fastq", "reads.fastq.gz", "reads_1.fastq", "reads_1.fastq.gz"):
start_fastq_to_fasta(api, collection, fastq_project, fastq_workflow_uuid, n, sample_id)
return False
if tgt is None:
- valid = False
- logging.warn("Upload '%s' does not contain sequence.fasta, reads.fastq or reads_1.fastq", collection["name"])
- except ValueError as v:
- valid = False
+ errors.append("Upload '%s' does not contain sequence.fasta, reads.fastq or reads_1.fastq", collection["name"])
+ except Exception as v:
+ errors.append(str(v))
- dup = api.collections().list(filters=[["owner_uuid", "=", validated_project],
- ["portable_data_hash", "=", col.portable_data_hash()]]).execute()
- if dup["items"]:
- # This exact collection has been uploaded before.
- valid = False
- logging.warn("Upload '%s' is duplicate" % collection["name"])
- if valid:
+ if not errors:
logging.info("Added '%s' to validated sequences" % collection["name"])
# Move it to the "validated" project to be included in the next analysis
+ collection["properties"]["status"] = "validated"
api.collections().update(uuid=collection["uuid"], body={
"owner_uuid": validated_project,
"name": "%s (%s)" % (collection["name"], time.asctime(time.gmtime()))}).execute()
+ return True
else:
- # It is invalid, delete it.
- logging.warn("Suggest deleting '%s'" % collection["name"])
- #api.collections().delete(uuid=collection["uuid"]).execute()
-
- return valid
+ # It is invalid
+ logging.warn("'%s' (%s) has validation errors: %s" % (
+ collection["name"], collection["uuid"], "\n".join(errors)))
+ collection["properties"]["status"] = "rejected"
+ collection["properties"]["errors"] = errors
+ api.collections().update(uuid=collection["uuid"], body={"properties": collection["properties"]}).execute()
+ return False
def run_workflow(api, parent_project, workflow_uuid, name, inputobj):
@@ -229,6 +235,50 @@ def upload_schema(api, workflow_def_project):
return "keep:%s/schema.yml" % pdh
+def print_status(api, uploader_project, fmt):
+ pending = arvados.util.list_all(api.collections().list, filters=[["owner_uuid", "=", uploader_project]])
+ out = []
+ status = {}
+ for p in pending:
+ prop = p["properties"]
+ out.append(prop)
+ if "status" not in prop:
+ prop["status"] = "pending"
+ prop["created_at"] = p["created_at"]
+ prop["uuid"] = p["uuid"]
+ status[prop["status"]] = status.get(prop["status"], 0) + 1
+ if fmt == "html":
+ print(
+"""
+<html>
+<body>
+""")
+ print("<p>Total collections in upload project %s</p>" % len(out))
+ print("<p>Status %s</p>" % status)
+ print(
+"""
+<table>
+<tr><th>Collection</th>
+<th>Sequence label</th>
+<th>Status</th>
+<th>Errors</th></tr>
+""")
+ for r in out:
+ print("<tr valign='top'>")
+ print("<td><a href='https://workbench.lugli.arvadosapi.com/collections/%s'>%s</a></td>" % (r["uuid"], r["uuid"]))
+ print("<td>%s</td>" % r["sequence_label"])
+ print("<td>%s</td>" % r["status"])
+ print("<td><pre>%s</pre></td>" % "\n".join(r.get("errors", [])))
+ print("</tr>")
+ print(
+"""
+</table>
+</body>
+</html>
+""")
+ else:
+ print(json.dumps(out, indent=2))
+
def main():
parser = argparse.ArgumentParser(description='Analyze collections uploaded to a project')
parser.add_argument('--uploader-project', type=str, default='lugli-j7d0g-n5clictpuvwk8aa', help='')
@@ -244,7 +294,9 @@ def main():
parser.add_argument('--latest-result-collection', type=str, default='lugli-4zz18-z513nlpqm03hpca', help='')
parser.add_argument('--kickoff', action="store_true")
+ parser.add_argument('--no-start-analysis', action="store_true")
parser.add_argument('--once', action="store_true")
+ parser.add_argument('--print-status', type=str, default=None)
args = parser.parse_args()
api = arvados.api()
@@ -263,20 +315,24 @@ def main():
args.exclude_list)
return
+ if args.print_status:
+ print_status(api, args.uploader_project, args.print_status)
+ exit(0)
+
logging.info("Starting up, monitoring %s for uploads" % (args.uploader_project))
while True:
move_fastq_to_fasta_results(api, args.fastq_project, args.uploader_project)
- new_collections = api.collections().list(filters=[['owner_uuid', '=', args.uploader_project]]).execute()
+ new_collections = arvados.util.list_all(api.collections().list, filters=[["owner_uuid", "=", args.uploader_project]])
at_least_one_new_valid_seq = False
- for c in new_collections["items"]:
+ for c in new_collections:
at_least_one_new_valid_seq = validate_upload(api, c,
args.validated_project,
args.fastq_project,
args.fastq_workflow_uuid) or at_least_one_new_valid_seq
- if at_least_one_new_valid_seq:
+ if at_least_one_new_valid_seq and not args.no_start_analysis:
start_pangenome_analysis(api,
args.pangenome_analysis_project,
args.pangenome_workflow_uuid,
diff --git a/bh20sequploader/main.py b/bh20sequploader/main.py
index c442af0..fd0278d 100644
--- a/bh20sequploader/main.py
+++ b/bh20sequploader/main.py
@@ -22,30 +22,32 @@ ARVADOS_API_HOST='lugli.arvadosapi.com'
ARVADOS_API_TOKEN='2fbebpmbo3rw3x05ueu2i6nx70zhrsb1p22ycu3ry34m4x4462'
UPLOAD_PROJECT='lugli-j7d0g-n5clictpuvwk8aa'
-def qa_stuff(metadata, sequence_p1, sequence_p2):
+def qc_stuff(metadata, sequence_p1, sequence_p2, do_qc=True):
+ failed = False
try:
- log.debug("Checking metadata")
- if not qc_metadata(metadata.name):
+ log.debug("Checking metadata" if do_qc else "Skipping metadata check")
+ if do_qc and not qc_metadata(metadata.name):
log.warning("Failed metadata qc")
- exit(1)
- except ValueError as e:
+ failed = True
+ except Exception as e:
log.debug(e)
- log.debug("Failed metadata qc")
print(e)
- exit(1)
+ failed = True
target = []
try:
- log.debug("Checking FASTA/FASTQ QC")
- target.append(qc_fasta(sequence_p1))
+ log.debug("FASTA/FASTQ QC" if do_qc else "Limited FASTA/FASTQ QC")
+ target.append(qc_fasta(sequence_p1, check_with_clustalw=do_qc))
if sequence_p2:
target.append(qc_fasta(sequence_p2))
target[0] = ("reads_1."+target[0][0][6:], target[0][1])
target[1] = ("reads_2."+target[1][0][6:], target[0][1])
- except ValueError as e:
+ except Exception as e:
log.debug(e)
- log.debug("Failed FASTA qc")
print(e)
+ failed = True
+
+ if failed:
exit(1)
return target
@@ -62,13 +64,14 @@ def main():
parser = argparse.ArgumentParser(description='Upload SARS-CoV-19 sequences for analysis')
parser.add_argument('metadata', type=argparse.FileType('r'), help='sequence metadata json')
parser.add_argument('sequence_p1', type=argparse.FileType('rb'), help='sequence FASTA/FASTQ')
- parser.add_argument('sequence_p2', type=argparse.FileType('rb'), default=None, help='sequence FASTQ pair')
+ parser.add_argument('sequence_p2', type=argparse.FileType('rb'), default=None, nargs='?', help='sequence FASTQ pair')
parser.add_argument("--validate", action="store_true", help="Dry run, validate only")
+ parser.add_argument("--skip-qc", action="store_true", help="Skip local qc check")
args = parser.parse_args()
api = arvados.api(host=ARVADOS_API_HOST, token=ARVADOS_API_TOKEN, insecure=True)
- target = qa_stuff(args.metadata, args.sequence_p1, args.sequence_p2)
+ target = qc_stuff(args.metadata, args.sequence_p1, args.sequence_p2, not args.skip_qc)
seqlabel = target[0][1]
if args.validate:
diff --git a/bh20sequploader/qc_fasta.py b/bh20sequploader/qc_fasta.py
index e198430..37eb4e8 100644
--- a/bh20sequploader/qc_fasta.py
+++ b/bh20sequploader/qc_fasta.py
@@ -25,7 +25,7 @@ def read_fasta(sequence):
raise ValueError("FASTA file contains multiple entries")
return label, bases
-def qc_fasta(arg_sequence):
+def qc_fasta(arg_sequence, check_with_clustalw=True):
log.debug("Starting qc_fasta")
schema_resource = pkg_resources.resource_stream(__name__, "validation/formats")
with tempfile.NamedTemporaryFile() as tmp:
@@ -51,47 +51,48 @@ def qc_fasta(arg_sequence):
if seq_type == "text/fasta":
# ensure that contains only one entry
submitlabel, submitseq = read_fasta(sequence)
+ sequence.seek(0)
+ sequence.detach()
- with tempfile.NamedTemporaryFile() as tmp1:
- refstring = pkg_resources.resource_string(__name__, "SARS-CoV-2-reference.fasta")
- tmp1.write(refstring)
- tmp1.write(submitlabel.encode("utf8"))
- tmp1.write(("".join(submitseq)).encode("utf8"))
- tmp1.flush()
- subbp = 0
- refbp = 0
- similarity = 0
- try:
- cmd = ["clustalw", "-infile="+tmp1.name,
- "-quicktree", "-iteration=none", "-type=DNA"]
- print("QC checking similarity to reference")
- print(" ".join(cmd))
- result = subprocess.run(cmd, stdout=subprocess.PIPE)
- res = result.stdout.decode("utf-8")
- g1 = re.search(r"^Sequence 1: [^ ]+ +(\d+) bp$", res, flags=re.MULTILINE)
- refbp = float(g1.group(1))
- g2 = re.search(r"^Sequence 2: [^ ]+ +(\d+) bp$", res, flags=re.MULTILINE)
- subbp = float(g2.group(1))
- g3 = re.search(r"^Sequences \(1:2\) Aligned\. Score: (\d+(\.\d+)?)$", res, flags=re.MULTILINE)
- similarity = float(g3.group(1))
+ if not check_with_clustalw:
+ return ("sequence.fasta"+gz, seqlabel)
- print(g1.group(0))
- print(g2.group(0))
- print(g3.group(0))
- except Exception as e:
- logging.warn("Error trying to QC against reference sequence using 'clustalw': %s", e)
+ with tempfile.NamedTemporaryFile() as tmp1:
+ with tempfile.NamedTemporaryFile() as tmp2:
+ refstring = pkg_resources.resource_string(__name__, "SARS-CoV-2-reference.fasta")
+ tmp1.write(refstring)
+ tmp1.flush()
+ tmp2.write(submitlabel.encode("utf8"))
+ tmp2.write(("".join(submitseq)).encode("utf8"))
+ tmp2.flush()
+ subbp = 0
+ refbp = 0
+ similarity = 0
+ try:
+ cmd = ["minimap2", "-c", tmp1.name, tmp2.name]
+ logging.info("QC checking similarity to reference")
+ logging.info(" ".join(cmd))
+ result = subprocess.run(cmd, stdout=subprocess.PIPE)
+ result.check_returncode()
+ res = result.stdout.decode("utf-8")
+ mm = res.split("\t")
+ if len(mm) >= 10:
+ # divide Number of matching bases in the mapping / Target sequence length
+ similarity = (float(mm[9]) / float(mm[6])) * 100.0
+ else:
+ similarity = 0
+ except Exception as e:
+ logging.warn("QC against reference sequence using 'minimap2': %s", e, exc_info=e)
- if refbp and (subbp/refbp) < .7:
- raise ValueError("QC fail: submit sequence length is shorter than 70% reference")
- if refbp and (subbp/refbp) > 1.3:
- raise ValueError("QC fail: submit sequence length is greater than 130% reference")
- if similarity and similarity < 70.0:
- raise ValueError("QC fail: submit similarity is less than 70%")
- if refbp == 0 or similarity == 0:
- raise ValueError("QC fail")
+ if similarity and similarity < 70.0:
+ raise ValueError("QC fail: alignment to reference was less than 70%% (was %2.2f%%)" % (similarity))
+ if similarity == 0:
+ raise ValueError("QC fail")
return ("sequence.fasta"+gz, seqlabel)
elif seq_type == "text/fastq":
+ sequence.seek(0)
+ sequence.detach()
return ("reads.fastq"+gz, seqlabel)
else:
raise ValueError("Sequence file does not look like a DNA FASTA or FASTQ")
diff --git a/bh20sequploader/qc_metadata.py b/bh20sequploader/qc_metadata.py
index 9122ace..2b57991 100644
--- a/bh20sequploader/qc_metadata.py
+++ b/bh20sequploader/qc_metadata.py
@@ -21,20 +21,13 @@ def qc_metadata(metadatafile):
shex = pkg_resources.resource_stream(__name__, "bh20seq-shex.rdf").read().decode("utf-8")
if not isinstance(avsc_names, schema_salad.avro.schema.Names):
- print(avsc_names)
- return False
+ raise Exception(avsc_names)
- try:
- doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, metadatafile, True)
- g = schema_salad.jsonld_context.makerdf("workflow", doc, document_loader.ctx)
- rslt, reason = evaluate(g, shex, doc["id"], "https://raw.githubusercontent.com/arvados/bh20-seq-resource/master/bh20sequploader/bh20seq-shex.rdf#submissionShape")
+ doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, metadatafile, True)
+ g = schema_salad.jsonld_context.makerdf("workflow", doc, document_loader.ctx)
+ rslt, reason = evaluate(g, shex, doc["id"], "https://raw.githubusercontent.com/arvados/bh20-seq-resource/master/bh20sequploader/bh20seq-shex.rdf#submissionShape")
- if not rslt:
- log.debug(reason)
- print(reason)
+ if not rslt:
+ raise Exception(reason)
- return rslt
- except Exception as e:
- traceback.print_exc()
- log.warn(e)
- return False
+ return True
diff --git a/scripts/create_sra_metadata/SraExperimentPackage.2020.07.05.xml.gz b/scripts/create_sra_metadata/SraExperimentPackage.2020.07.05.xml.gz
new file mode 100644
index 0000000..88acb18
--- /dev/null
+++ b/scripts/create_sra_metadata/SraExperimentPackage.2020.07.05.xml.gz
Binary files differ
diff --git a/scripts/download_sra_data/download_sra_data.py b/scripts/create_sra_metadata/create_sra_metadata.py
index 9145a43..ef0d119 100644
--- a/scripts/download_sra_data/download_sra_data.py
+++ b/scripts/create_sra_metadata/create_sra_metadata.py
@@ -8,9 +8,11 @@ import gzip
dir_yaml = 'yaml'
-date = '2020.06.08'
+date = '2020.07.05'
-# Query on SRA: 'txid2697049[Organism]' (https://www.ncbi.nlm.nih.gov/sra/?term=txid2697049%5BOrganism%5D) -> Send to -> File -> Full XML -> Create File
+# Query on SRA: 'txid2697049[Organism]' (https://www.ncbi.nlm.nih.gov/sra/?term=txid2697049%5BOrganism%5D)
+# Query on SRA: 'txid2697049[Organism:noexp] NOT 0[Mbases ' (https://www.ncbi.nlm.nih.gov/sra/?term=txid2697049%5BOrganism:noexp%5D%20NOT%200[Mbases)
+# -> Send to -> File -> Full XML -> Create File
path_sra_metadata_xml = 'SraExperimentPackage.{}.xml.gz'.format(date)
dir_dict_ontology_standardization = '../dict_ontology_standardization/'
@@ -70,14 +72,14 @@ for i, EXPERIMENT_PACKAGE in enumerate(EXPERIMENT_PACKAGE_SET):
accession = RUN.attrib['accession']
run_accession_set.add(accession)
#print(accession)
-
+
info_for_yaml_dict['sample']['sample_id'] = accession
- SRAFiles = RUN.find('SRAFiles')
- if SRAFiles is not None:
- url = SRAFiles.find('SRAFile').attrib['url']
- if 'sra-download.ncbi.nlm.nih.gov' in url:
- run_accession_to_downloadble_file_url_dict[accession] = url
+ #SRAFiles = RUN.find('SRAFiles')
+ #if SRAFiles is not None:
+ # url = SRAFiles.find('SRAFile').attrib['url']
+ # if 'sra-download.ncbi.nlm.nih.gov' in url:
+ # run_accession_to_downloadble_file_url_dict[accession] = url
SAMPLE = EXPERIMENT_PACKAGE.find('SAMPLE')
@@ -90,6 +92,9 @@ for i, EXPERIMENT_PACKAGE in enumerate(EXPERIMENT_PACKAGE_SET):
VALUE_text = VALUE.text
if TAG_text in ['host', 'host scientific name']:
+ if VALUE_text.lower() in ['homo sapien', 'homosapiens']:
+ VALUE_text = 'Homo sapiens'
+
if VALUE_text in term_to_uri_dict:
info_for_yaml_dict['host']['host_species'] = term_to_uri_dict[VALUE_text]
else:
@@ -100,11 +105,19 @@ for i, EXPERIMENT_PACKAGE in enumerate(EXPERIMENT_PACKAGE_SET):
elif VALUE_text.strip("'") not in ['missing', 'not collected', 'not provided']:
missing_value_list.append('\t'.join([accession, 'host_health_status', VALUE_text]))
elif TAG_text in ['strain', 'isolate']:
- if VALUE_text.lower() not in ['not applicable', 'missing', 'na', 'unknown']:
- if 'virus_strain' not in info_for_yaml_dict:
- info_for_yaml_dict['virus']['virus_strain'] = VALUE_text
+ if VALUE_text.lower() not in ['not applicable', 'missing', 'na', 'unknown', 'not provided']:
+ value_to_insert = VALUE_text
+
+ if value_to_insert.lower() in ['homo sapien', 'homosapiens']:
+ value_to_insert = 'Homo sapiens'
+
+ if value_to_insert in term_to_uri_dict:
+ value_to_insert = term_to_uri_dict[value_to_insert]
+
+ if 'virus_strain' not in info_for_yaml_dict:
+ info_for_yaml_dict['virus']['virus_strain'] = value_to_insert
else:
- info_for_yaml_dict['virus']['virus_strain'] += '; ' + VALUE_text
+ info_for_yaml_dict['virus']['virus_strain'] += '; ' + value_to_insert
elif TAG_text in ['isolation_source', 'isolation source host-associated']:
if VALUE_text in term_to_uri_dict:
info_for_yaml_dict['sample']['specimen_source'] = [term_to_uri_dict[VALUE_text]]
@@ -179,17 +192,18 @@ for i, EXPERIMENT_PACKAGE in enumerate(EXPERIMENT_PACKAGE_SET):
EXPERIMENT = EXPERIMENT_PACKAGE.find('EXPERIMENT')
INSTRUMENT_MODEL = [x.text for x in EXPERIMENT.find('PLATFORM').iter('INSTRUMENT_MODEL')][0]
+
if INSTRUMENT_MODEL.lower() != 'unspecified':
if INSTRUMENT_MODEL in term_to_uri_dict:
info_for_yaml_dict['technology']['sample_sequencing_technology'] = [term_to_uri_dict[INSTRUMENT_MODEL]]
else:
missing_value_list.append('\t'.join([accession, 'sample_sequencing_technology', INSTRUMENT_MODEL]))
-
+ #else:
+ # print(accession, 'Missing INSTRUMENT_MODEL', info_for_yaml_dict)
LIBRARY_DESCRIPTOR = EXPERIMENT.find('DESIGN').find('LIBRARY_DESCRIPTOR')
if LIBRARY_DESCRIPTOR.text not in ['OTHER']:
info_for_yaml_dict['technology']['additional_technology_information'] = 'LIBRARY_STRATEGY: {};'.format(LIBRARY_DESCRIPTOR.find('LIBRARY_STRATEGY').text)
-
-
+
SUBMISSION = EXPERIMENT_PACKAGE.find('SUBMISSION')
info_for_yaml_dict['submitter']['submitter_sample_id'] = SUBMISSION.attrib['accession']
@@ -197,7 +211,7 @@ for i, EXPERIMENT_PACKAGE in enumerate(EXPERIMENT_PACKAGE_SET):
info_for_yaml_dict['submitter']['originating_lab'] = SUBMISSION.attrib['lab_name']
STUDY = EXPERIMENT_PACKAGE.find('STUDY')
- info_for_yaml_dict['submitter']['publication'] = SUBMISSION.attrib['lab_name']
+ info_for_yaml_dict['submitter']['publication'] = STUDY.attrib['alias']
Organization = EXPERIMENT_PACKAGE.find('Organization')
@@ -222,6 +236,10 @@ for i, EXPERIMENT_PACKAGE in enumerate(EXPERIMENT_PACKAGE_SET):
info_for_yaml_dict['sample']['collection_date'] = '1970-01-01'
info_for_yaml_dict['sample']['additional_collection_information'] = "The real 'collection_date' is missing"
+ if 'sample_sequencing_technology' not in info_for_yaml_dict['technology']:
+ print(accession, ' - technology not found')
+ continue
+
with open(os.path.join(dir_yaml, '{}.yaml'.format(accession)), 'w') as fw:
json.dump(info_for_yaml_dict, fw, indent=2)
@@ -229,4 +247,4 @@ if len(missing_value_list) > 0:
path_missing_terms_tsv = 'missing_terms.tsv'
print('Written missing terms in {}'.format(path_missing_terms_tsv))
with open(path_missing_terms_tsv, 'w') as fw:
- fw.write('\n'.join(missing_value_list)) \ No newline at end of file
+ fw.write('\n'.join(missing_value_list))
diff --git a/scripts/dict_ontology_standardization/ncbi_countries.csv b/scripts/dict_ontology_standardization/ncbi_countries.csv
index 204f7f2..4bea3ec 100644
--- a/scripts/dict_ontology_standardization/ncbi_countries.csv
+++ b/scripts/dict_ontology_standardization/ncbi_countries.csv
@@ -284,6 +284,7 @@ USA:CA,http://www.wikidata.org/entity/Q99
USA: California,http://www.wikidata.org/entity/Q99
USA:California,http://www.wikidata.org/entity/Q99
"USA: CA, San Diego County",http://www.wikidata.org/entity/Q108143
+"USA: California, Monterey County",http://www.wikidata.org/entity/Q108072
USA: CO,http://www.wikidata.org/entity/Q1261
USA: CT,http://www.wikidata.org/entity/Q779
USA: Connecticut,http://www.wikidata.org/entity/Q779
@@ -301,8 +302,9 @@ USA: IN,http://www.wikidata.org/entity/Q1415
USA: KS,http://www.wikidata.org/entity/Q1558
USA: KY,http://www.wikidata.org/entity/Q1603
USA: LA,http://www.wikidata.org/entity/Q1588
-USA:Los Angeles,http://www.wikidata.org/entity/Q65
-"USA: New Orleans, LA",http://www.wikidata.org/entity/Q34404
+"USA: SLIDELL, LA, LA",https://www.wikidata.org/wiki/Q988156
+"USA: Slidell, LA, LA",https://www.wikidata.org/wiki/Q988156
+"USA: New Orleans, LA",https://www.wikidata.org/wiki/Q34404
USA: MA,http://www.wikidata.org/entity/Q771
USA: Massachusetts,http://www.wikidata.org/entity/Q771
USA: MD,http://www.wikidata.org/entity/Q1391
@@ -336,6 +338,33 @@ USA: SC,http://www.wikidata.org/entity/Q1456
USA: South Carolina,http://www.wikidata.org/entity/Q1456
USA: SD,http://www.wikidata.org/entity/Q1211
"USA: Snohomish County, WA",http://www.wikidata.org/entity/Q110403
+"USA: Washington,Snohomish County",http://www.wikidata.org/entity/Q110403
+"USA: Washington, Snohomish County",http://www.wikidata.org/entity/Q110403
+"USA: Washington,Skagit County",http://www.wikidata.org/entity/Q113892
+"USA: Washington, Skagit County",http://www.wikidata.org/entity/Q113892
+"USA: Washington,Pierce County",http://www.wikidata.org/entity/Q156459
+"USA: Washington, Pierce County",http://www.wikidata.org/entity/Q156459
+"USA: Washington,Mason County",http://www.wikidata.org/entity/Q111904
+"USA: Washington, Mason County",http://www.wikidata.org/entity/Q111904
+"USA: Washington,Kittitas County",http://www.wikidata.org/entity/Q111540
+"USA: Washington,King County",http://www.wikidata.org/entity/Q108861
+"USA: Washington, King County",http://www.wikidata.org/entity/Q108861
+"USA: King County,WA",http://www.wikidata.org/entity/Q108861
+"USA: Washington,Jefferson County",http://www.wikidata.org/entity/Q384737
+"USA: Washington,Grant County",http://www.wikidata.org/entity/Q281681
+"USA: Washington, Grant County",http://www.wikidata.org/entity/Q281681
+"USA: Washington,Franklin County",http://www.wikidata.org/entity/Q118716
+"USA: Washington, Franklin County",http://www.wikidata.org/entity/Q118716
+"USA: Washington,Clark County",http://www.wikidata.org/entity/Q156287
+"USA: Washington,Benton County",http://www.wikidata.org/entity/Q156216
+"USA: Washington, Benton County",http://www.wikidata.org/entity/Q156216
+"USA: Washington,Asotin County",http://www.wikidata.org/entity/Q156295
+"USA: Washington, Asotin County",http://www.wikidata.org/entity/Q156295
+"USA: Washington,Adams County",http://www.wikidata.org/entity/Q156273
+"USA: Washington, Adams County",http://www.wikidata.org/entity/Q156273
+"USA: Washington, Spokane County",http://www.wikidata.org/entity/Q485276
+"USA: Washington, Douglas County",http://www.wikidata.org/entity/Q156220
+"USA: Washington, Cowlitz County",http://www.wikidata.org/entity/Q156276
USA: TN,http://www.wikidata.org/entity/Q1509
USA: TX,http://www.wikidata.org/entity/Q1439
USA: UT,http://www.wikidata.org/entity/Q829
@@ -347,6 +376,10 @@ USA: WA,http://www.wikidata.org/entity/Q1223
USA: Washington,http://www.wikidata.org/entity/Q1223
USA: WI,http://www.wikidata.org/entity/Q1537
USA: Wisconsin,http://www.wikidata.org/entity/Q1537
+"USA: Washington,Yakima County",http://www.wikidata.org/entity/Q156629
+"USA: Washington, Yakima County",http://www.wikidata.org/entity/Q156629
+"USA: Washington,Whatcom County",http://www.wikidata.org/entity/Q156623
+"USA: Dane County, Wisconsin",http://www.wikidata.org/entity/Q502200
USA: WV,http://www.wikidata.org/entity/Q1371
USA: WY,http://www.wikidata.org/entity/Q1214
Uzbekistan,http://www.wikidata.org/entity/Q265
diff --git a/scripts/dict_ontology_standardization/ncbi_host_species.csv b/scripts/dict_ontology_standardization/ncbi_host_species.csv
index bc6ac04..40572a3 100644
--- a/scripts/dict_ontology_standardization/ncbi_host_species.csv
+++ b/scripts/dict_ontology_standardization/ncbi_host_species.csv
@@ -5,5 +5,6 @@ sapiens,http://purl.obolibrary.org/obo/NCBITaxon_9606
Mustela lutreola,http://purl.obolibrary.org/obo/NCBITaxon_9666
Manis javanica,http://purl.obolibrary.org/obo/NCBITaxon_9974
Felis catus,http://purl.obolibrary.org/obo/NCBITaxon_9685
+Felis catus; Domestic Shorthair,http://purl.obolibrary.org/obo/NCBITaxon_9685
Panthera tigris jacksoni,http://purl.obolibrary.org/obo/NCBITaxon_419130
Canis lupus familiaris,http://purl.obolibrary.org/obo/NCBITaxon_9615
diff --git a/scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv b/scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv
index 964cbf3..59c4800 100644
--- a/scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv
+++ b/scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv
@@ -13,12 +13,14 @@ Illumina NextSeq 550,http://www.ebi.ac.uk/efo/EFO_0008566
NextSeq550,http://www.ebi.ac.uk/efo/EFO_0008566
NextSeq 550,http://www.ebi.ac.uk/efo/EFO_0008566
Illumina MiniSeq,http://www.ebi.ac.uk/efo/EFO_0008636
+Illumina NovaSeq,http://www.ebi.ac.uk/efo/EFO_0008637
Illumina NovaSeq 6000,http://www.ebi.ac.uk/efo/EFO_0008637
Nanopore MinION,http://www.ebi.ac.uk/efo/EFO_0008632
Oxford Nanopore MinION,http://www.ebi.ac.uk/efo/EFO_0008632
ONT (Oxford Nanopore Technologies),http://purl.obolibrary.org/obo/NCIT_C146818
Oxford Nanopore Technology,http://purl.obolibrary.org/obo/NCIT_C146818
Oxford Nanopore technologies MinION,http://www.ebi.ac.uk/efo/EFO_0008632
+Oxford Nanopore Sequencing,http://purl.obolibrary.org/obo/NCIT_C146818
MinION Oxford Nanopore,http://www.ebi.ac.uk/efo/EFO_0008632
MinION,http://www.ebi.ac.uk/efo/EFO_0008632
Nanopore,http://purl.obolibrary.org/obo/NCIT_C146818
@@ -33,3 +35,24 @@ ThermoFisher S5Plus,http://purl.obolibrary.org/obo/NCIT_C125894
Sanger dideoxy sequencing,http://purl.obolibrary.org/obo/NCIT_C19641
MGISEQ 2000,http://virtual-bh/MGISEQ2000
MGISEQ2000,http://virtual-bh/MGISEQ2000
+Illumina HiSeq X,http://www.ebi.ac.uk/efo/EFO_0008567
+ONT GridION X5,http://www.ebi.ac.uk/efo/EFO_0008633
+ONT PremethION,http://www.ebi.ac.uk/efo/EFO_0008634
+PacBio RS II,http://www.ebi.ac.uk/efo/EFO_0008631
+PacBio Sequel System,http://www.ebi.ac.uk/efo/EFO_0008630
+Illumina Genome Analyzer,http://www.ebi.ac.uk/efo/EFO_0004200
+Illumina Genome Analyzer II,http://www.ebi.ac.uk/efo/EFO_0004201
+Illumina Genome Analyzer IIx,http://www.ebi.ac.uk/efo/EFO_0004202
+454 GS 20 sequencer,http://www.ebi.ac.uk/efo/EFO_0004206
+454 GS FLX Titanium sequencer,http://www.ebi.ac.uk/efo/EFO_0004433
+454 GS FLX sequencer,http://www.ebi.ac.uk/efo/EFO_0004432
+454 GS Junior sequencer,http://www.ebi.ac.uk/efo/EFO_0004434
+454 GS sequencer,http://www.ebi.ac.uk/efo/EFO_0004431
+AB SOLiD 4 System,http://www.ebi.ac.uk/efo/EFO_0004438
+AB SOLiD 4hq System,http://www.ebi.ac.uk/efo/EFO_0004441
+AB SOLiD 5500,http://www.ebi.ac.uk/efo/EFO_0004440
+AB SOLiD 5500xl,http://www.ebi.ac.uk/efo/EFO_0004436
+AB SOLiD PI System,http://www.ebi.ac.uk/efo/EFO_0004437
+AB SOLiD System,http://www.ebi.ac.uk/efo/EFO_0004435
+AB SOLiD System 2.0,http://www.ebi.ac.uk/efo/EFO_0004442
+AB SOLiD System 3.0,http://www.ebi.ac.uk/efo/EFO_0004439
diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile
index 9fb33d5..02829d4 100644
--- a/scripts/docker/Dockerfile
+++ b/scripts/docker/Dockerfile
@@ -3,8 +3,8 @@ FROM debian:10
RUN apt-get update && \
apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
python3 python3-pip python3-setuptools python3-dev python-pycurl \
- clustalw python3-biopython libcurl4-openssl-dev build-essential \
+ minimap2 python3-biopython libcurl4-openssl-dev build-essential \
libssl-dev libmagic-dev python3-magic && \
apt-get clean
-RUN pip3 install bh20-seq-uploader
+RUN pip3 install bh20-seq-uploader py-dateutil
diff --git a/scripts/download_genbank_data/from_genbank_to_fasta_and_yaml.py b/scripts/download_genbank_data/from_genbank_to_fasta_and_yaml.py
index d76f56b..cb94787 100755
--- a/scripts/download_genbank_data/from_genbank_to_fasta_and_yaml.py
+++ b/scripts/download_genbank_data/from_genbank_to_fasta_and_yaml.py
@@ -4,6 +4,8 @@ import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--skip-request', action='store_true', help='skip metadata and sequence request', required=False)
parser.add_argument('--only-missing-id', action='store_true', help='download only missing id', required=False)
+parser.add_argument('--dict-ontology', type=str, help='where is the ontology',
+ default='../dict_ontology_standardization/',required=False)
args = parser.parse_args()
from Bio import Entrez
@@ -22,7 +24,7 @@ num_ids_for_request = 100
dir_metadata = 'metadata_from_nuccore'
dir_fasta_and_yaml = 'fasta_and_yaml'
-dir_dict_ontology_standardization = '../dict_ontology_standardization/'
+dir_dict_ontology_standardization = args.dict_ontology
today_date = date.today().strftime("%Y.%m.%d")
path_ncbi_virus_accession = 'sequences.{}.acc'.format(today_date)
@@ -126,7 +128,7 @@ for path_dict_xxx_csv in [os.path.join(dir_dict_ontology_standardization, name_x
if term in term_to_uri_dict:
print('Warning: in the dictionaries there are more entries for the same term ({}).'.format(term))
continue
-
+
term_to_uri_dict[term] = uri
if not os.path.exists(dir_fasta_and_yaml):
@@ -171,7 +173,9 @@ for path_metadata_xxx_xml in [os.path.join(dir_metadata, name_metadata_xxx_xml)
# submitter info
GBSeq_references = GBSeq.find('GBSeq_references')
if GBSeq_references is not None:
- info_for_yaml_dict['submitter']['authors'] = ["{}".format(x.text) for x in GBSeq_references.iter('GBAuthor')]
+ author_list = ["{}".format(x.text) for x in GBSeq_references.iter('GBAuthor')]
+ if len(author_list) > 0:
+ info_for_yaml_dict['submitter']['authors'] = author_list
GBReference = GBSeq_references.find('GBReference')
if GBReference is not None:
@@ -220,7 +224,8 @@ for path_metadata_xxx_xml in [os.path.join(dir_metadata, name_metadata_xxx_xml)
new_seq_tec_list.append(seq_tec)
- info_for_yaml_dict['technology']['sample_sequencing_technology'] = [x for x in new_seq_tec_list]
+ if len(new_seq_tec_list) > 0:
+ info_for_yaml_dict['technology']['sample_sequencing_technology'] = [x for x in new_seq_tec_list]
else:
info_for_yaml_dict['technology'][field_in_yaml] = tech_info_to_parse
@@ -238,58 +243,62 @@ for path_metadata_xxx_xml in [os.path.join(dir_metadata, name_metadata_xxx_xml)
GBQualifier_name_text = GBQualifier.find('GBQualifier_name').text
if GBQualifier_name_text == 'host':
- GBQualifier_value_text_list = GBQualifier_value_text.split('; ')
-
- if GBQualifier_value_text_list[0] in term_to_uri_dict:
- info_for_yaml_dict['host']['host_species'] = term_to_uri_dict[GBQualifier_value_text_list[0]]
- elif GBQualifier_value_text_list[0] and ('MT215193' in accession_version or 'MT270814' in accession_version):
- # Information checked manually from NCBI Virus
- info_for_yaml_dict['host']['host_species'] = term_to_uri_dict['Canis lupus familiaris']
+ if GBQualifier_value_text in term_to_uri_dict:
+ # Cases like 'Felis catus; Domestic Shorthair'
+ info_for_yaml_dict['host']['host_species'] = term_to_uri_dict[GBQualifier_value_text]
else:
- missing_value_list.append('\t'.join([accession_version, 'host_species', GBQualifier_value_text_list[0]]))
-
- # Possible cases:
- # - Homo sapiens --> ['Homo sapiens']
- # - Homo sapiens; female --> ['Homo sapiens', 'female']
- # - Homo sapiens; female 63 --> ['Homo sapiens', 'female 63']
- # - Homo sapiens; female; age 40 --> ['Homo sapiens', 'female', 'age 40']
- # - Homo sapiens; gender: F; age: 61 --> ['Homo sapiens', 'gender: F', 'age: 61']
- # - Homo sapiens; gender: M; age: 68 --> ['Homo sapiens', 'gender: M', 'age: 68']
- # - Homo sapiens; hospitalized patient --> ['Homo sapiens', 'hospitalized patient']
- # - Homo sapiens; male --> ['Homo sapiens', 'male']
- # - Homo sapiens; male; 63 --> ['Homo sapiens', 'male', '63']
- # - Homo sapiens; male; age 29 --> ['Homo sapiens', 'male', 'age 29']
- # - Homo sapiens; symptomatic --> ['Homo sapiens', 'symptomatic']
- if len(GBQualifier_value_text_list) > 1:
- host_sex = ''
- if 'female' in GBQualifier_value_text_list[1]:
- host_sex = 'female'
- elif 'male' in GBQualifier_value_text_list[1]:
- host_sex = 'male'
- elif 'gender' in GBQualifier_value_text_list[1]:
- host_sex_one_lecter = GBQualifier_value_text_list[1].split(':')[-1].strip()
- if host_sex_one_lecter in ['F', 'M']:
- host_sex = 'female' if host_sex_one_lecter == 'F' else 'male'
-
- if host_sex in ['male', 'female']:
- info_for_yaml_dict['host']['host_sex'] = "http://purl.obolibrary.org/obo/PATO_0000384" if host_sex == 'male' else "http://purl.obolibrary.org/obo/PATO_0000383"
- elif GBQualifier_value_text_list[1] in term_to_uri_dict:
- info_for_yaml_dict['host']['host_health_status'] = term_to_uri_dict[GBQualifier_value_text_list[1]]
+ GBQualifier_value_text_list = GBQualifier_value_text.split('; ')
+
+ if GBQualifier_value_text_list[0] in term_to_uri_dict:
+ info_for_yaml_dict['host']['host_species'] = term_to_uri_dict[GBQualifier_value_text_list[0]]
+ elif GBQualifier_value_text_list[0] and ('MT215193' in accession_version or 'MT270814' in accession_version):
+ # Information checked manually from NCBI Virus
+ info_for_yaml_dict['host']['host_species'] = term_to_uri_dict['Canis lupus familiaris']
else:
- missing_value_list.append('\t'.join([accession_version, 'host_sex or host_health_status', GBQualifier_value_text_list[1]]))
-
- # Host age
- host_age = -1
- if len(GBQualifier_value_text_list[1].split(' ')) > 1 and is_integer(GBQualifier_value_text_list[1].split(' ')[-1]):
- host_age = int(GBQualifier_value_text_list[1].split(' ')[-1])
- elif len(GBQualifier_value_text_list) > 2 and is_integer(GBQualifier_value_text_list[2].split(' ')[-1]):
- host_age = int(GBQualifier_value_text_list[2].split(' ')[-1])
-
- if host_age > -1:
- info_for_yaml_dict['host']['host_age'] = host_age
- info_for_yaml_dict['host']['host_age_unit'] = 'http://purl.obolibrary.org/obo/UO_0000036'
- elif len(GBQualifier_value_text_list) > 2:
- missing_value_list.append('\t'.join([accession_version, 'host_age', GBQualifier_value_text_list[2]]))
+ missing_value_list.append('\t'.join([accession_version, 'host_species', GBQualifier_value_text_list[0]]))
+
+ # Possible cases:
+ # - Homo sapiens --> ['Homo sapiens']
+ # - Homo sapiens; female --> ['Homo sapiens', 'female']
+ # - Homo sapiens; female 63 --> ['Homo sapiens', 'female 63']
+ # - Homo sapiens; female; age 40 --> ['Homo sapiens', 'female', 'age 40']
+ # - Homo sapiens; gender: F; age: 61 --> ['Homo sapiens', 'gender: F', 'age: 61']
+ # - Homo sapiens; gender: M; age: 68 --> ['Homo sapiens', 'gender: M', 'age: 68']
+ # - Homo sapiens; hospitalized patient --> ['Homo sapiens', 'hospitalized patient']
+ # - Homo sapiens; male --> ['Homo sapiens', 'male']
+ # - Homo sapiens; male; 63 --> ['Homo sapiens', 'male', '63']
+ # - Homo sapiens; male; age 29 --> ['Homo sapiens', 'male', 'age 29']
+ # - Homo sapiens; symptomatic --> ['Homo sapiens', 'symptomatic']
+ if len(GBQualifier_value_text_list) > 1:
+ host_sex = ''
+ if 'female' in GBQualifier_value_text_list[1]:
+ host_sex = 'female'
+ elif 'male' in GBQualifier_value_text_list[1]:
+ host_sex = 'male'
+ elif 'gender' in GBQualifier_value_text_list[1]:
+ host_sex_one_lecter = GBQualifier_value_text_list[1].split(':')[-1].strip()
+ if host_sex_one_lecter in ['F', 'M']:
+ host_sex = 'female' if host_sex_one_lecter == 'F' else 'male'
+
+ if host_sex in ['male', 'female']:
+ info_for_yaml_dict['host']['host_sex'] = "http://purl.obolibrary.org/obo/PATO_0000384" if host_sex == 'male' else "http://purl.obolibrary.org/obo/PATO_0000383"
+ elif GBQualifier_value_text_list[1] in term_to_uri_dict:
+ info_for_yaml_dict['host']['host_health_status'] = term_to_uri_dict[GBQualifier_value_text_list[1]]
+ else:
+ missing_value_list.append('\t'.join([accession_version, 'host_sex or host_health_status', GBQualifier_value_text_list[1]]))
+
+ # Host age
+ host_age = -1
+ if len(GBQualifier_value_text_list[1].split(' ')) > 1 and is_integer(GBQualifier_value_text_list[1].split(' ')[-1]):
+ host_age = int(GBQualifier_value_text_list[1].split(' ')[-1])
+ elif len(GBQualifier_value_text_list) > 2 and is_integer(GBQualifier_value_text_list[2].split(' ')[-1]):
+ host_age = int(GBQualifier_value_text_list[2].split(' ')[-1])
+
+ if host_age > -1:
+ info_for_yaml_dict['host']['host_age'] = host_age
+ info_for_yaml_dict['host']['host_age_unit'] = 'http://purl.obolibrary.org/obo/UO_0000036'
+ elif len(GBQualifier_value_text_list) > 2:
+ missing_value_list.append('\t'.join([accession_version, 'host_age', GBQualifier_value_text_list[2]]))
elif GBQualifier_name_text == 'collected_by':
if any([x in GBQualifier_value_text.lower() for x in ['institute', 'hospital', 'city', 'center']]):
info_for_yaml_dict['sample']['collecting_institution'] = GBQualifier_value_text
@@ -362,9 +371,9 @@ for path_metadata_xxx_xml in [os.path.join(dir_metadata, name_metadata_xxx_xml)
info_for_yaml_dict['virus']['virus_species'] = "http://purl.obolibrary.org/obo/NCBITaxon_"+GBQualifier_value_text.split('taxon:')[1]
- # Remove technology key if empty!
- if (info_for_yaml_dict['technology']=={}):
- del info_for_yaml_dict['technology']
+ if 'sample_sequencing_technology' not in info_for_yaml_dict['technology']:
+ print(accession_version, ' - technology not found')
+ continue
with open(os.path.join(dir_fasta_and_yaml, '{}.fasta'.format(accession_version)), 'w') as fw:
fw.write('>{}\n{}'.format(accession_version, GBSeq_sequence.text.upper()))
@@ -391,5 +400,5 @@ if len(accession_with_errors_list) > 0:
print('Written the accession with errors in {}'.format(path_accession_with_errors_tsv))
with open(path_accession_with_errors_tsv, 'w') as fw:
fw.write('\n'.join(accession_with_errors_list))
-
+
print('Num. new sequences with length >= {} bp: {}'.format(min_len_to_count, num_seq_with_len_ge_X_bp))
diff --git a/scripts/download_sra_data/SraExperimentPackage.2020.06.08.xml.gz b/scripts/download_sra_data/SraExperimentPackage.2020.06.08.xml.gz
deleted file mode 100644
index f9cd995..0000000
--- a/scripts/download_sra_data/SraExperimentPackage.2020.06.08.xml.gz
+++ /dev/null
Binary files differ
diff --git a/scripts/fetch_from_genbank.cwl b/scripts/fetch_from_genbank.cwl
new file mode 100644
index 0000000..45c8eec
--- /dev/null
+++ b/scripts/fetch_from_genbank.cwl
@@ -0,0 +1,49 @@
+cwlVersion: v1.1
+class: CommandLineTool
+inputs:
+ importScript:
+ type: File
+ default:
+ class: File
+ location: download_genbank_data/from_genbank_to_fasta_and_yaml.py
+ inputBinding: {position: 1}
+ dict:
+ type: Directory
+ inputBinding:
+ prefix: --dict-ontology
+ position: 2
+ default:
+ class: Directory
+ location: dict_ontology_standardization
+ existing_metadata_from_nuccore:
+ type: Directory?
+ inputBinding:
+ valueFrom: "--skip-request"
+ position: 3
+outputs:
+ fasta_and_yaml:
+ type: Directory
+ outputBinding:
+ glob: fasta_and_yaml
+ metadata_from_nuccore:
+ type: Directory
+ outputBinding:
+ glob: metadata_from_nuccore
+ accessions:
+ type: File?
+ outputBinding:
+ glob: "*.acc"
+ missing_terms:
+ type: File
+ outputBinding:
+ glob: missing_terms.tsv
+requirements:
+ InitialWorkDirRequirement:
+ listing:
+ - entry: $(inputs.existing_metadata_from_nuccore)
+ entryname: metadata_from_nuccore
+ DockerRequirement:
+ dockerPull: bh20-seq-uploader/import
+ NetworkAccess:
+ networkAccess: true
+baseCommand: python3
diff --git a/scripts/import.cwl b/scripts/import.cwl
index d84516b..4b4b8ca 100644
--- a/scripts/import.cwl
+++ b/scripts/import.cwl
@@ -12,7 +12,7 @@ inputs:
type: File
default:
class: File
- location: from_genbank_to_fasta_and_yaml.py
+ location: download_genbank_data/from_genbank_to_fasta_and_yaml.py
inputBinding: {position: 2}
dict:
type: Directory
diff --git a/scripts/import_from_genbank.cwl b/scripts/import_from_genbank.cwl
new file mode 100644
index 0000000..dcf9acb
--- /dev/null
+++ b/scripts/import_from_genbank.cwl
@@ -0,0 +1,27 @@
+cwlVersion: v1.1
+class: Workflow
+inputs:
+ existing_metadata_from_nuccore:
+ type: Directory?
+outputs: []
+requirements:
+ ScatterFeatureRequirement: {}
+steps:
+ fetch_from_genbank:
+ in:
+ existing_metadata_from_nuccore: existing_metadata_from_nuccore
+ out: [fasta_and_yaml, metadata_from_nuccore, accessions]
+ run: fetch_from_genbank.cwl
+ split_into_arrays:
+ in:
+ dir: fetch_from_genbank/fasta_and_yaml
+ out: [fasta, metadata]
+ run: split_into_arrays.cwl
+ upload:
+ in:
+ fasta: split_into_arrays/fasta
+ metadata: split_into_arrays/metadata
+ out: []
+ scatter: [fasta, metadata]
+ scatterMethod: dotproduct
+ run: upload.cwl
diff --git a/scripts/import_to_arvados.py b/scripts/import_to_arvados.py
index 78cd13d..aca72d6 100644
--- a/scripts/import_to_arvados.py
+++ b/scripts/import_to_arvados.py
@@ -11,4 +11,4 @@ os.chdir("fasta_and_yaml")
fasta_files = glob.glob("*.fasta")
for f in fasta_files:
- subprocess.run(["bh20-seq-uploader", f, "%s.yaml" %f[:-6]])
+ subprocess.run(["bh20-seq-uploader", "%s.yaml" %f[:-6], f])
diff --git a/scripts/split_into_arrays.cwl b/scripts/split_into_arrays.cwl
new file mode 100644
index 0000000..102fe7d
--- /dev/null
+++ b/scripts/split_into_arrays.cwl
@@ -0,0 +1,30 @@
+cwlVersion: v1.1
+class: ExpressionTool
+requirements:
+ InlineJavascriptRequirement: {}
+inputs:
+ dir:
+ type: Directory
+ loadListing: shallow_listing
+outputs:
+ fasta: File[]
+ metadata: File[]
+expression: |
+ ${
+ var dir = inputs.dir;
+ var fasta = [];
+ var metadata = [];
+ dir.listing.sort(function(a, b) { return a.basename < b.basename; });
+ for (var i = 0; i < dir.listing.length; i++) {
+ if (dir.listing[i].basename.substr(-6) == ".fasta") {
+ fasta.push(dir.listing[i]);
+ }
+ if (dir.listing[i].basename.substr(-5) == ".yaml") {
+ metadata.push(dir.listing[i]);
+ }
+ }
+ if (fasta.length != metadata.length) {
+ throw "They dont match";
+ }
+ return {"fasta": fasta, "metadata": metadata};
+ }
diff --git a/scripts/upload.cwl b/scripts/upload.cwl
new file mode 100644
index 0000000..0fed09a
--- /dev/null
+++ b/scripts/upload.cwl
@@ -0,0 +1,13 @@
+cwlVersion: v1.1
+class: CommandLineTool
+inputs:
+ fasta: File
+ metadata: File
+outputs: []
+requirements:
+ DockerRequirement:
+ dockerPull: bh20-seq-uploader/import
+ NetworkAccess:
+ networkAccess: true
+baseCommand: bh20-seq-uploader
+arguments: [--skip-qc, $(inputs.metadata), $(inputs.fasta)]
diff --git a/semantic_enrichment/countries.ttl b/semantic_enrichment/countries.ttl
index b56740f..3851a19 100644
--- a/semantic_enrichment/countries.ttl
+++ b/semantic_enrichment/countries.ttl
@@ -220,7 +220,6 @@
ns1:P17 <http://www.wikidata.org/entity/Q30> ;
ns1:P625 "Point(-120.0 37.0)" .
-<http://www.wikidata.org/entity/Q15> rdfs:label "Africa" .
<http://www.wikidata.org/entity/Q155> rdfs:label "Brazil" ;
ns1:P17 <http://www.wikidata.org/entity/Q155> ;
@@ -232,9 +231,6 @@
ns1:P30 <http://www.wikidata.org/entity/Q46> ;
ns1:P625 "Point(16.0 50.0)" .
-<http://www.wikidata.org/entity/Q258> rdfs:label "South Africa" ;
- ns1:P30 <http://www.wikidata.org/entity/Q15> .
-
<http://www.wikidata.org/entity/Q33> rdfs:label "Finland" ;
ns1:P17 <http://www.wikidata.org/entity/Q33> ;
ns1:P30 <http://www.wikidata.org/entity/Q46> ;
@@ -250,9 +246,6 @@
ns1:P30 <http://www.wikidata.org/entity/Q46> ;
ns1:P625 "Point(12.5 42.5)" .
-<http://www.wikidata.org/entity/Q408> rdfs:label "Australia" ;
- ns1:P30 <http://www.wikidata.org/entity/Q538> .
-
<http://www.wikidata.org/entity/Q41> rdfs:label "Greece" ;
ns1:P17 <http://www.wikidata.org/entity/Q41> ;
ns1:P30 <http://www.wikidata.org/entity/Q46> ;
@@ -269,13 +262,6 @@
<http://www.wikidata.org/entity/Q48> ;
ns1:P625 "Point(36.0 39.0)" .
-<http://www.wikidata.org/entity/Q49> rdfs:label "North America" .
-
-<http://www.wikidata.org/entity/Q668> rdfs:label "India" ;
- ns1:P30 <http://www.wikidata.org/entity/Q48> .
-
-<http://www.wikidata.org/entity/Q739> rdfs:label "Colombia" ;
- ns1:P30 <http://www.wikidata.org/entity/Q18> .
<http://www.wikidata.org/entity/Q794> rdfs:label "Iran" ;
ns1:P17 <http://www.wikidata.org/entity/Q794> ;
@@ -307,9 +293,6 @@
ns1:P30 <http://www.wikidata.org/entity/Q48> ;
ns1:P625 "Point(121.0 24.0)" .
-<http://www.wikidata.org/entity/Q881> rdfs:label "Vietnam" ;
- ns1:P30 <http://www.wikidata.org/entity/Q48> .
-
<http://www.wikidata.org/entity/Q884> rdfs:label "South Korea" ;
ns1:P17 <http://www.wikidata.org/entity/Q884> ;
ns1:P30 <http://www.wikidata.org/entity/Q48> ;
@@ -320,24 +303,62 @@
ns1:P30 <http://www.wikidata.org/entity/Q46> ;
ns1:P625 "Point(-3.0 40.0)" .
-<http://www.wikidata.org/entity/Q538> rdfs:label "Oceania" .
-
-<http://www.wikidata.org/entity/Q843> rdfs:label "Pakistan" ;
- ns1:P30 <http://www.wikidata.org/entity/Q48> .
-
-<http://www.wikidata.org/entity/Q18> rdfs:label "South America" .
+<http://www.wikidata.org/entity/Q30> rdfs:label "United States of America" ;
+ ns1:P17 <http://www.wikidata.org/entity/Q30> ;
+ ns1:P30 <http://www.wikidata.org/entity/Q49> ;
+ ns1:P625 "Point(-98.5795 39.828175)" .
-<http://www.wikidata.org/entity/Q46> rdfs:label "Europe" .
<http://www.wikidata.org/entity/Q148> rdfs:label "People's Republic of China" ;
ns1:P17 <http://www.wikidata.org/entity/Q148> ;
ns1:P30 <http://www.wikidata.org/entity/Q48> ;
ns1:P625 "Point(103.451944444 35.844722222)" .
-<http://www.wikidata.org/entity/Q48> rdfs:label "Asia" .
-
-<http://www.wikidata.org/entity/Q30> rdfs:label "United States of America" ;
+<http://www.wikidata.org/entity/Q1558> rdfs:label "Kansas" ;
ns1:P17 <http://www.wikidata.org/entity/Q30> ;
- ns1:P30 <http://www.wikidata.org/entity/Q49> ;
- ns1:P625 "Point(-98.5795 39.828175)" .
+ ns1:P625 "Point(-98.0 38.5)" .
+
+<http://www.wikidata.org/entity/Q1718> rdfs:label "Düsseldorf" ;
+ ns1:P17 <http://www.wikidata.org/entity/Q183> ;
+ ns1:P625 "Point(6.772380555 51.231144444)" .
+
+<http://www.wikidata.org/entity/Q142> rdfs:label "France" ;
+ ns1:P17 <http://www.wikidata.org/entity/Q142> ;
+ ns1:P30 <http://www.wikidata.org/entity/Q46> ;
+ ns1:P625 "Point(2.0 47.0)" .
+
+
+<http://www.wikidata.org/entity/Q79> rdfs:label "Egypt" ;
+ ns1:P17 <http://www.wikidata.org/entity/Q79> ;
+ ns1:P30 <http://www.wikidata.org/entity/Q15>,
+ <http://www.wikidata.org/entity/Q48> ;
+ ns1:P625 "Point(29.0 27.0)" .
+
+<http://www.wikidata.org/entity/Q183> rdfs:label "Germany" ;
+ ns1:P30 <http://www.wikidata.org/entity/Q46> .
+
+<http://www.wikidata.org/entity/Q843> rdfs:label "Pakistan" ;
+ ns1:P30 <http://www.wikidata.org/entity/Q48> .
+
+<http://www.wikidata.org/entity/Q881> rdfs:label "Vietnam" ;
+ ns1:P30 <http://www.wikidata.org/entity/Q48> .
+
+<http://www.wikidata.org/entity/Q668> rdfs:label "India" ;
+ ns1:P30 <http://www.wikidata.org/entity/Q48> .
+
+<http://www.wikidata.org/entity/Q739> rdfs:label "Colombia" ;
+ ns1:P30 <http://www.wikidata.org/entity/Q18> .
+
+<http://www.wikidata.org/entity/Q258> rdfs:label "South Africa" ;
+ ns1:P30 <http://www.wikidata.org/entity/Q15> .
+
+<http://www.wikidata.org/entity/Q408> rdfs:label "Australia" ;
+ ns1:P30 <http://www.wikidata.org/entity/Q538> .
+
+<http://www.wikidata.org/entity/Q538> rdfs:label "Oceania" .
+<http://www.wikidata.org/entity/Q49> rdfs:label "North America" .
+<http://www.wikidata.org/entity/Q18> rdfs:label "South America" .
+<http://www.wikidata.org/entity/Q46> rdfs:label "Europe" .
+<http://www.wikidata.org/entity/Q48> rdfs:label "Asia" .
+<http://www.wikidata.org/entity/Q15> rdfs:label "Africa" .
diff --git a/semantic_enrichment/labels.ttl b/semantic_enrichment/labels.ttl
index 85c40e8..68c21bc 100644
--- a/semantic_enrichment/labels.ttl
+++ b/semantic_enrichment/labels.ttl
@@ -33,6 +33,8 @@
<http://www.ebi.ac.uk/efo/EFO_0008632> <http://www.w3.org/2000/01/rdf-schema#label> "ONT MinION" .
<http://purl.obolibrary.org/obo/NCIT_C146818> <http://www.w3.org/2000/01/rdf-schema#label> " Oxford Nanopore Sequencing" .
<http://www.ebi.ac.uk/efo/EFO_0004205> <http://www.w3.org/2000/01/rdf-schema#label> "Illumina MiSeq" .
+<http://www.ebi.ac.uk/efo/EFO_0008567> <http://www.w3.org/2000/01/rdf-schema#label> "Illumina HiSeq X" .
+<http://www.ebi.ac.uk/efo/EFO_0008637> <http://www.w3.org/2000/01/rdf-schema#label> "Illumina NovaSeq 6000" .
<http://purl.obolibrary.org/obo/OBI_0000759> <http://www.w3.org/2000/01/rdf-schema#label> "Illumina" .
<http://purl.obolibrary.org/obo/NCIT_C125894> <http://www.w3.org/2000/01/rdf-schema#label> "Ion Semiconductor Sequencing".
<http://purl.obolibrary.org/obo/NCIT_C19641> <http://www.w3.org/2000/01/rdf-schema#label> "Dideoxy Chain Termination DNA Sequencing" .