about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--bh20seqanalyzer/main.py14
-rw-r--r--bh20sequploader/bh20seq-options.yml9
-rw-r--r--bh20sequploader/bh20seq-schema.yml76
-rw-r--r--bh20sequploader/bh20seq-shex.rdf40
-rw-r--r--bh20sequploader/main.py8
-rw-r--r--example/metadata.yaml14
-rw-r--r--example/minimal_example.yaml6
-rw-r--r--scripts/dict_ontology_standardization/ncbi_countries.csv2
-rw-r--r--scripts/dict_ontology_standardization/ncbi_host_health_status.csv8
-rw-r--r--scripts/dict_ontology_standardization/ncbi_speciesman_source.csv5
-rwxr-xr-xscripts/foreach.sh18
-rwxr-xr-x[-rw-r--r--]scripts/from_genbank_to_fasta_and_yaml.py349
-rw-r--r--scripts/sequences.acc121
-rw-r--r--workflows/pangenome-generate/merge-metadata.cwl44
-rw-r--r--workflows/pangenome-generate/merge-metadata.py40
-rw-r--r--workflows/pangenome-generate/minimap2.cwl2
-rw-r--r--workflows/pangenome-generate/pangenome-generate.cwl15
-rw-r--r--workflows/pangenome-generate/relabel-seqs.cwl50
-rw-r--r--workflows/pangenome-generate/relabel-seqs.py30
-rw-r--r--workflows/pangenome-generate/seqkit-rmdup.cwl7
-rw-r--r--workflows/pangenome-generate/testjob.yml16
21 files changed, 637 insertions, 237 deletions
diff --git a/bh20seqanalyzer/main.py b/bh20seqanalyzer/main.py
index 193a268..8d0f562 100644
--- a/bh20seqanalyzer/main.py
+++ b/bh20seqanalyzer/main.py
@@ -214,14 +214,26 @@ def main():
     parser.add_argument('--fastq-workflow-uuid', type=str, default='lugli-7fd4e-2zp9q4jo5xpif9y', help='')
 
     parser.add_argument('--latest-result-collection', type=str, default='lugli-4zz18-z513nlpqm03hpca', help='')
+    parser.add_argument('--kickoff', action="store_true")
     args = parser.parse_args()
 
     api = arvados.api()
 
-    logging.info("Starting up, monitoring %s for uploads" % (args.uploader_project))
+
 
     schema_ref = upload_schema(api, args.workflow_def_project)
 
+    if args.kickoff:
+        logging.info("Starting a single analysis run")
+        start_pangenome_analysis(api,
+                                 args.pangenome_analysis_project,
+                                 args.pangenome_workflow_uuid,
+                                 args.validated_project,
+                                 schema_ref)
+        return
+
+    logging.info("Starting up, monitoring %s for uploads" % (args.uploader_project))
+
     while True:
         move_fastq_to_fasta_results(api, args.fastq_project, args.uploader_project)
 
diff --git a/bh20sequploader/bh20seq-options.yml b/bh20sequploader/bh20seq-options.yml
index 7320ecf..4c1d15c 100644
--- a/bh20sequploader/bh20seq-options.yml
+++ b/bh20sequploader/bh20seq-options.yml
@@ -15,6 +15,15 @@ host_sex:
   Female: http://purl.obolibrary.org/obo/PATO_0000383
   Intersex: http://purl.obolibrary.org/obo/PATO_0001340
 
+host_health_status:
+  healthy: http://purl.obolibrary.org/obo/NCIT_C115935
+  asymptomatic:	http://purl.obolibrary.org/obo/NCIT_C3833
+  sympotmatic:	http://purl.obolibrary.org/obo/NCIT_C25269
+  admitted to hospital:	http://purl.obolibrary.org/obo/GENEPIO_0002020
+  discharged from hospital:	http://purl.obolibrary.org/obo/GENEPIO_0001849
+  dead:	http://purl.obolibrary.org/obo/NCIT_C28554
+  alive: http://purl.obolibrary.org/obo/NCIT_C37987
+
 sample_sequencing_technology:
   Illumina NextSeq 500: http://www.ebi.ac.uk/efo/EFO_0009173
   Illumina NextSeq 550: http://www.ebi.ac.uk/efo/EFO_0008566
diff --git a/bh20sequploader/bh20seq-schema.yml b/bh20sequploader/bh20seq-schema.yml
index bbcafc8..9a89979 100644
--- a/bh20sequploader/bh20seq-schema.yml
+++ b/bh20sequploader/bh20seq-schema.yml
@@ -18,17 +18,19 @@ $graph:
         jsonldPredicate:
           _id: http://www.ebi.ac.uk/efo/EFO_0000532
           _type: "@id"
+          noLinkCheck: true
     host_id:
         doc: Identifer for the host. If you submit multiple samples from the same host, use the same host_id for those samples
-        type: string
+        type: string?
         jsonldPredicate:
           _id: http://semanticscience.org/resource/SIO_000115
     host_sex:
-        doc: Sex of the host, IRI expected
+        doc: Sex of the host as defined in PATO, expect male () or female ()
         type: string?
         jsonldPredicate:
           _id: http://purl.obolibrary.org/obo/PATO_0000047
           _type: "@id"
+          noLinkCheck: true
     host_age:
         doc: Age of the host as number (e.g. 50)
         type: int?
@@ -40,10 +42,12 @@ $graph:
         jsonldPredicate:
           _id: http://purl.obolibrary.org/obo/NCIT_C42574
           _type: "@id"
+          noLinkCheck: true
     host_health_status:
-        doc: A condition or state at a particular time
+        doc: A condition or state at a particular time, must be one of the following (obo:NCIT_C115935 obo:NCIT_C3833 obo:NCIT_C25269 obo:GENEPIO_0002020 obo:GENEPIO_0001849 obo:NCIT_C28554 obo:NCIT_C37987)
         type: string?
-        jsonldPredicate: http://purl.obolibrary.org/obo/NCIT_C25688
+        jsonldPredicate:
+          _id: http://purl.obolibrary.org/obo/NCIT_C25688
     host_treatment:
       doc: Process in which the act is intended to modify or alter host status
       type: string?
@@ -63,39 +67,47 @@ $graph:
 - name: sampleSchema
   type: record
   fields:
+    sample_id:
+      doc: Id of the sample as defined by the submitter
+      type: string
+      jsonldPredicate:
+        _id: http://semanticscience.org/resource/SIO_000115
+    collection_date:
+      doc: Date when the sample was taken
+      type: string
+      jsonldPredicate:
+        _id: http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#C25164
+    collection_location:
+      doc: Geographical location where the sample was collected as wikidata reference, e.g. http://www.wikidata.org/entity/Q148 (China)
+      type: string
+      jsonldPredicate:
+        _id: http://purl.obolibrary.org/obo/GAZ_00000448
+        _type: "@id"
+        noLinkCheck: true
     collector_name:
       doc: Name of the person that took the sample
-      type: string
+      type: string?
       jsonldPredicate:
           _id: http://purl.obolibrary.org/obo/OBI_0001895
     collecting_institution:
       doc: Institute that was responsible of sampeling
-      type: string
+      type: string?
       jsonldPredicate:
-          _id: http://semanticscience.org/resource/SIO_001167
+          _id: http://purl.obolibrary.org/obo/NCIT_C41206
     specimen_source:
       doc: Method how the specimen was derived as NCIT IRI, e.g. http://purl.obolibrary.org/obo/NCIT_C155831 (=nasopharyngeal swab)
       type: string?
       jsonldPredicate:
           _id: http://purl.obolibrary.org/obo/OBI_0001479
           _type: "@id"
+          noLinkCheck: true
     specimen_source2:
       doc: Method how the specimen was derived as NCIT IRI, e.g. http://purl.obolibrary.org/obo/NCIT_C155835 (=throat swabb)
       type: string?
       jsonldPredicate:
           _id: http://purl.obolibrary.org/obo/OBI_0001479
           _type: "@id"
-    collection_date:
-      doc: Date when the sample was taken
-      type: string
-      jsonldPredicate:
-          _id: http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#C25164
-    collection_location:
-      doc: Geographical location where the sample was collected as wikidata reference, e.g. http://www.wikidata.org/entity/Q148 (China)
-      type: string
-      jsonldPredicate:
-        _id: http://purl.obolibrary.org/obo/GAZ_00000448
-        _type: "@id"
+          noLinkCheck: true
     sample_storage_conditions:
       doc: Information about storage of a specified type, e.g.  frozen specimen, paraffin, fresh ....
       type: string?
@@ -106,11 +118,6 @@ $graph:
       type: string?
       jsonldPredicate:
           _id: http://semanticscience.org/resource/SIO_001167
-    sample_id:
-      doc: Id of the sample as defined by the submitter
-      type: string
-      jsonldPredicate:
-          _id: http://semanticscience.org/resource/SIO_000115
     source_database_accession:
       doc: If data is deposit at a public resource (e.g. Genbank, ENA) enter the Accession Id here
       type: string?
@@ -126,6 +133,7 @@ $graph:
       jsonldPredicate:
           _id: http://edamontology.org/data_1875
           _type: "@id"
+          noLinkCheck: true
     virus_strain:
       doc: Name of the virus strain
       type: string?
@@ -137,16 +145,18 @@ $graph:
   fields:
     sample_sequencing_technology:
       doc: Technology that was used to sequence this sample (e.g Sanger, Nanopor MiniION)
-      type: string
+      type: string?
       jsonldPredicate:
         _id: http://purl.obolibrary.org/obo/OBI_0600047
         _type: "@id"
+        noLinkCheck: true
     sample_sequencing_technology2:
       doc: Technology that was used to sequence this sample (e.g Sanger, Nanopor MiniION)
       type: string?
       jsonldPredicate:
         _id: http://purl.obolibrary.org/obo/OBI_0600047
         _type: "@id"
+        noLinkCheck: true
     sequence_assembly_method:
       doc: Protocol which provides instructions on the alignment of sequencing reads to reference genome
       type: string?
@@ -158,7 +168,7 @@ $graph:
       jsonldPredicate:
         _id: http://purl.obolibrary.org/obo/FLU_0000848
     sequencing_coverage2:
-      doc: If a second sequence technology was use you can submit its coverage here
+      doc: If a second sequence technology was used you can submit its coverage here
       type: float?
       jsonldPredicate:
         _id: http://purl.obolibrary.org/obo/FLU_0000848
@@ -171,9 +181,14 @@ $graph:
 - name: submitterSchema
   type: record
   fields:
+    authors:
+      doc: Name of the author(s)
+      type: string
+      jsonldPredicate:
+          _id: http://purl.obolibrary.org/obo/NCIT_C42781
     submitter_name:
       doc: Name of the submitter
-      type: string
+      type: string?
       jsonldPredicate:
           _id: http://semanticscience.org/resource/SIO_000116
     submitter_address:
@@ -183,7 +198,7 @@ $graph:
           _id: http://semanticscience.org/resource/SIO_000172
     originating_lab:
       doc: Name of the laboratory that took the sample
-      type: string
+      type: string?
       jsonldPredicate:
           _id: http://purl.obolibrary.org/obo/NCIT_C37984
     lab_address:
@@ -199,11 +214,6 @@ $graph:
       type: string?
       jsonldPredicate:
           _id: http://www.ebi.ac.uk/efo/EFO_0001741
-    authors:
-      doc: Name of the author(s)
-      type: string?
-      jsonldPredicate:
-          _id: http://purl.obolibrary.org/obo/NCIT_C42781
     publication:
       doc: Reference to publication of this sample (e.g. DOI, pubmed ID, ...)
       type: string?
@@ -223,7 +233,7 @@ $graph:
   fields:
     host: hostSchema
     sample: sampleSchema
-    virus: virusSchema?
+    virus: virusSchema
     technology: technologySchema
     submitter: submitterSchema
     id:
diff --git a/bh20sequploader/bh20seq-shex.rdf b/bh20sequploader/bh20seq-shex.rdf
index 8d3f5fc..246fd57 100644
--- a/bh20sequploader/bh20seq-shex.rdf
+++ b/bh20sequploader/bh20seq-shex.rdf
@@ -7,6 +7,7 @@ PREFIX sio: <http://semanticscience.org/resource/>
 PREFIX efo: <http://www.ebi.ac.uk/efo/>
 PREFIX evs: <http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#>
 PREFIX edam: <http://edamontology.org/>
+PREFIX wikidata: <http://www.wikidata.org/entity/>
 
 :submissionShape {
   MainSchema:host   @:hostShape ;
@@ -18,39 +19,44 @@ PREFIX edam: <http://edamontology.org/>
 
 :hostShape  {
   	efo:EFO_0000532 [ obo:NCBITaxon_~ ] ;
-    obo:PATO_0000047 [ obo:NCIT_C20197  obo:NCIT_C27993  obo:NCIT_C17998 ] ;
-    sio:SIO_000115 xsd:string ;
+    sio:SIO_000115 xsd:string ?;
+    obo:PATO_0000047 [ obo:PATO_0000384 obo:PATO_0000383 ] ?;
     obo:PATO_0000011 xsd:integer ?;
     obo:NCIT_C42574 [ obo:UO_~ ] ?;
-    sio:SIO_001167 xsd:string ?;
+	obo:NCIT_C25688 [obo:NCIT_C115935 obo:NCIT_C3833 obo:NCIT_C25269 obo:GENEPIO_0002020 obo:GENEPIO_0001849 obo:NCIT_C28554 obo:NCIT_C37987 ] ? ;
     efo:EFO_0000727 xsd:string ?;
+    obo:VO_0000002 xsd:string ?;
+    sio:SIO_001167 xsd:string ?;
 }
 
 :sampleShape  {
-    obo:OBI_0001895 xsd:string ;
-    sio:SIO_000115 xsd:string ;
-    sio:SIO_001167 xsd:string ;
-	evs:C25164 xsd:string ?;
-    obo:GAZ_00000448 [obo:GAZ_~] ?;
+    sio:SIO_000115 xsd:string;
+	evs:C25164 xsd:string;
+	obo:GAZ_00000448 [wikidata:~] ;
+    obo:OBI_0001895 xsd:string ?;
+    obo:NCIT_C41206 xsd:string ?;
+    obo:OBI_0001479 IRI {0,2};
     obo:OBI_0001472 xsd:string ?;
-    obo:OBI_0001479 xsd:string ?;
+    sio:SIO_001167 xsd:string ?;
 }
 
 :submitterShape {
-    sio:SIO_000116 xsd:string ;
-	obo:NCIT_C37984 xsd:string ;
-	obo:NCIT_C37900 xsd:string ?;
-    obo:NCIT_C42781 xsd:string ?;
-    obo:OBI_0600047 xsd:string ?;
-    sio:SIO_000115 /https:\u002F\u002Forcid.org\u002F.{4}-.{4}-.{4}-.{4}/?;
+    obo:NCIT_C42781 xsd:string ;
+    sio:SIO_000116 xsd:string ?;
     sio:SIO_000172 xsd:string ?;
+    obo:NCIT_C37984 xsd:string ?;
+    obo:OBI_0600047 xsd:string ?;
+  	obo:NCIT_C37900 xsd:string ?;
     efo:EFO_0001741 xsd:string ?;
+    obo:NCIT_C19026 xsd:string ?;
+    sio:SIO_000115 /https:\u002F\u002Forcid.org\u002F.{4}-.{4}-.{4}-.{4}/?;
 }
 
 :technologyShape {
-    obo:OBI_0600047 xsd:string ;
-    obo:FLU_0000848 xsd:integer ?;
+    obo:OBI_0600047 IRI {0,2} ;
     efo:EFO_0002699 xsd:string ?;
+    obo:FLU_0000848 xsd:double {0,2};
+    sio:SIO_001167 xsd:string ?;
 }
 
 :virusShape{
diff --git a/bh20sequploader/main.py b/bh20sequploader/main.py
index 49d012d..4c4711d 100644
--- a/bh20sequploader/main.py
+++ b/bh20sequploader/main.py
@@ -44,7 +44,8 @@ def main():
 
     with col.open(target, "w") as f:
         r = args.sequence.read(65536)
-        print(r[0:20])
+        seqlabel = r[1:r.index("\n")]
+        print(seqlabel)
         while r:
             f.write(r)
             r = args.sequence.read(65536)
@@ -62,13 +63,14 @@ def main():
     external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
 
     properties = {
+        "sequence_label": seqlabel,
         "upload_app": "bh20-seq-uploader",
         "upload_ip": external_ip,
         "upload_user": "%s@%s" % (getpass.getuser(), socket.gethostname())
     }
 
-    col.save_new(owner_uuid=UPLOAD_PROJECT, name="Uploaded by %s from %s" %
-                 (properties['upload_user'], properties['upload_ip']),
+    col.save_new(owner_uuid=UPLOAD_PROJECT, name="%s uploaded by %s from %s" %
+                 (seqlabel, properties['upload_user'], properties['upload_ip']),
                  properties=properties, ensure_unique_name=True)
 
     print("Done")
diff --git a/example/metadata.yaml b/example/metadata.yaml
index 1e83400..a76616c 100644
--- a/example/metadata.yaml
+++ b/example/metadata.yaml
@@ -6,15 +6,15 @@ host:
     host_sex: http://purl.obolibrary.org/obo/NCIT_C27993
     host_age: 20
     host_age_unit: http://purl.obolibrary.org/obo/UO_0000036
-    host_health_status: A condition or state at a particular time
-    host_treatment: Process in which the act is intended to modify or alter host status
-    host_vaccination: List of vaccines given to the host
+    host_health_status: http://purl.obolibrary.org/obo/NCIT_C25269
+    host_treatment: Process in which the act is intended to modify or alter host status (Compounds)
+    host_vaccination: List of vaccines given to the host (RRIDs?)
     additional_host_information: Field for additional host information
 
 sample:
-    sample_id: Id of the sample as defined by the submitter
+    sample_id: Id of the sample as defined by the submitter 
     collector_name: Name of the person that took the sample
-    collecting_institution: Institute that was responsible of sampeling
+    collecting_institution: Institute that was responsible of sampling  
     specimen_source: http://purl.obolibrary.org/obo/NCIT_C155831
     specimen_source2: http://purl.obolibrary.org/obo/NCIT_C155835
     collection_date: "2020-01-01"
@@ -36,8 +36,8 @@ submitter:
     submitter_name: John Doe
     submitter_address: John Doe's adress
     originating_lab: John Doe kitchen
-    lab_address: John Doe's adress
+    lab_address: John Doe's address
     provider_sample_id: HmX
     submitter_sample_id: xXx
     authors: John Doe et all
-    submitter_orcid: https://orcid.org/0000-0000-0000-0000
+    submitter_orcid: https://orcid.org/0000-0000-0000-0000
\ No newline at end of file
diff --git a/example/minimal_example.yaml b/example/minimal_example.yaml
index ed578e2..0e36a25 100644
--- a/example/minimal_example.yaml
+++ b/example/minimal_example.yaml
@@ -1,13 +1,10 @@
 id: placeholder
 
 host:
-    host_id: XX1
     host_species: http://purl.obolibrary.org/obo/NCBITaxon_9606
 
 sample:
     sample_id: XX
-    collector_name: John Doe
-    collecting_institution: Doe university
     collection_date: 2020-01
     collection_location: http://www.wikidata.org/entity/Q148
 
@@ -18,5 +15,4 @@ technology:
     sample_sequencing_technology: http://www.ebi.ac.uk/efo/EFO_0008632
 
 submitter:
-    submitter_name: John Doe
-    originating_lab: John Doe's kitchen
\ No newline at end of file
+    authors: John Doe
\ No newline at end of file
diff --git a/scripts/dict_ontology_standardization/ncbi_countries.csv b/scripts/dict_ontology_standardization/ncbi_countries.csv
index d5e2235..20e8a9b 100644
--- a/scripts/dict_ontology_standardization/ncbi_countries.csv
+++ b/scripts/dict_ontology_standardization/ncbi_countries.csv
@@ -63,6 +63,7 @@ China: Jiangxi,http://www.wikidata.org/entity/Q57052
 China: Jilin,http://www.wikidata.org/entity/Q45208
 China: Liaoning,http://www.wikidata.org/entity/Q43934
 China: Macau,http://www.wikidata.org/entity/Q14773
+China: Nanchang,https://www.wikidata.org/wiki/Q171943
 China: Ningxia Hui Autonomous Region,http://www.wikidata.org/entity/Q57448
 China: Qinghai,http://www.wikidata.org/entity/Q45833
 China: Shaanxi,http://www.wikidata.org/entity/Q47974
@@ -274,6 +275,7 @@ USA: IN,http://www.wikidata.org/entity/Q1415
 USA: KS,http://www.wikidata.org/entity/Q1558
 USA: KY,http://www.wikidata.org/entity/Q1603
 USA: LA,http://www.wikidata.org/entity/Q1588
+"USA: New Orleans, LA",https://www.wikidata.org/wiki/Q34404
 USA: MA,http://www.wikidata.org/entity/Q771
 USA: MD,http://www.wikidata.org/entity/Q1391
 USA: ME,http://www.wikidata.org/entity/Q724
diff --git a/scripts/dict_ontology_standardization/ncbi_host_health_status.csv b/scripts/dict_ontology_standardization/ncbi_host_health_status.csv
new file mode 100644
index 0000000..bbb9347
--- /dev/null
+++ b/scripts/dict_ontology_standardization/ncbi_host_health_status.csv
@@ -0,0 +1,8 @@
+healthy,http://purl.obolibrary.org/obo/NCIT_C115935
+asymptomatic,http://purl.obolibrary.org/obo/NCIT_C3833
+sympotmatic,http://purl.obolibrary.org/obo/NCIT_C25269
+admitted to hospital,http://purl.obolibrary.org/obo/GENEPIO_0002020
+hospitalized patient,http://purl.obolibrary.org/obo/GENEPIO_0002020
+discharged from hospital,http://purl.obolibrary.org/obo/GENEPIO_0001849
+dead,http://purl.obolibrary.org/obo/NCIT_C28554
+alive,http://purl.obolibrary.org/obo/NCIT_C37987
diff --git a/scripts/dict_ontology_standardization/ncbi_speciesman_source.csv b/scripts/dict_ontology_standardization/ncbi_speciesman_source.csv
index 2905588..8278c90 100644
--- a/scripts/dict_ontology_standardization/ncbi_speciesman_source.csv
+++ b/scripts/dict_ontology_standardization/ncbi_speciesman_source.csv
@@ -1,4 +1,4 @@
-nasopharyngeal swab, http://purl.obolibrary.org/obo/NCIT_C155831
+nasopharyngeal swab,http://purl.obolibrary.org/obo/NCIT_C155831
 nasopharyngeal exudate,http://purl.obolibrary.org/obo/NCIT_C155831
 respiratory swab,http://purl.obolibrary.org/obo/NCIT_C155831
 naso-pharyngeal exudate,http://purl.obolibrary.org/obo/NCIT_C155831
@@ -22,4 +22,5 @@ bronchoalveolar lavage,http://purl.obolibrary.org/obo/NCIT_C13195
 sputum,http://purl.obolibrary.org/obo/NCIT_C13278
 aspirate,http://purl.obolibrary.org/obo/NCIT_C13347
 stool,http://purl.obolibrary.org/obo/NCIT_C13234
-serum,http://purl.obolibrary.org/obo/NCIT_C13325
\ No newline at end of file
+serum,http://purl.obolibrary.org/obo/NCIT_C13325
+saliva,http://purl.obolibrary.org/obo/NCIT_C13275
diff --git a/scripts/foreach.sh b/scripts/foreach.sh
new file mode 100755
index 0000000..ddc9387
--- /dev/null
+++ b/scripts/foreach.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+rm -rf validated fasta_and_yaml_*
+mkdir -p validated
+./from_genbank_to_fasta_and_yaml.py
+fasta_files=$(find fasta_and_yaml/ -name "*.fasta")
+for f in $fasta_files ; do
+    yaml=$(echo $f | rev | cut -c7- | rev).yaml
+    echo $f
+    echo $yaml
+    if bh20-seq-uploader --validate $f $yaml ; then
+	sz=$(stat --format=%s $f)
+	if test $sz -gt 20000 ; then
+	    mv $f $yaml validated
+	else
+	    echo "Fasta file too small"
+	fi
+    fi
+done
diff --git a/scripts/from_genbank_to_fasta_and_yaml.py b/scripts/from_genbank_to_fasta_and_yaml.py
index 0c410d7..f76cb29 100644..100755
--- a/scripts/from_genbank_to_fasta_and_yaml.py
+++ b/scripts/from_genbank_to_fasta_and_yaml.py
@@ -1,59 +1,59 @@
+#!/usr/bin/env python3
+
 from Bio import Entrez
-Entrez.email = 'insert_your_email@gmail.com'
+Entrez.email = 'another_email@gmail.com'
 
 import xml.etree.ElementTree as ET
-import yaml
+import json
 import os
 
-from datetime import date
-today = date.today().strftime("%Y%m%d")
-
-dir_metadata_today = 'metadata_from_nuccore_{}'.format(today)
-dir_fasta_and_yaml_today = 'fasta_and_yaml_{}'.format(today)
+num_ids_for_request = 100
 
+dir_metadata = 'metadata_from_nuccore'
+dir_fasta_and_yaml = 'fasta_and_yaml'
 dir_dict_ontology_standardization = 'dict_ontology_standardization/'
-
 path_ncbi_virus_accession = 'sequences.acc'
 
-# Take all the ids
-id_set = set()
+def chunks(lst, n):
+    for i in range(0, len(lst), n):
+        yield lst[i:i + n]
 
-term_list = ['SARS-CoV-2', 'SARS-CoV2', 'SARS CoV2', 'SARSCoV2', 'txid2697049[Organism]']
-for term in term_list:
-    tmp_list = Entrez.read(
-        Entrez.esearch(db='nuccore', term=term, idtype='acc', retmax='10000')
-    )['IdList']
+if not os.path.exists(dir_metadata):
+    os.makedirs(dir_metadata)
 
-    # Remove mRNAs, ncRNAs, Proteins, and predicted models (more information here: https://en.wikipedia.org/wiki/RefSeq)
-    tmp_list = [x for x in tmp_list if x[:2] not in ['NM', 'NR', 'NP', 'XM', 'XR', 'XP', 'WP']]
+    # Take all the ids
+    id_set = set()
 
-    # Remove the version in the id
-    tmp_list = [x.split('.')[0] for x in tmp_list]
-    
-    print(term, len(tmp_list))
+    term_list = ['SARS-CoV-2', 'SARS-CoV2', 'SARS CoV2', 'SARSCoV2', 'txid2697049[Organism]']
+    for term in term_list:
+        tmp_list = Entrez.read(
+            Entrez.esearch(db='nuccore', term=term, idtype='acc', retmax='10000')
+        )['IdList']
 
-    id_set.update([x.split('.')[0] for x in tmp_list])
+        # Remove mRNAs, ncRNAs, Proteins, and predicted models (more information here: https://en.wikipedia.org/wiki/RefSeq)
+        tmp_list = [x for x in tmp_list if x[:2] not in ['NM', 'NR', 'NP', 'XM', 'XR', 'XP', 'WP']]
 
-print(term_list, len(id_set))
+        # Remove the version in the id
+        tmp_list = [x.split('.')[0] for x in tmp_list]
 
-with open(path_ncbi_virus_accession) as f:
-    tmp_list = [line.strip('\n') for line in f]
+        print(term, len(tmp_list))
+        tmp_list=tmp_list
+    #    tmp_list = tmp_list[0:2] # restricting to small run
 
-print('NCBI Virus', len(tmp_list))
-id_set.update(tmp_list)
+        id_set.update([x.split('.')[0] for x in tmp_list])
 
-print(term_list + ['NCBI Virus'], len(id_set))
+    print(term_list, len(id_set))
+
+    with open(path_ncbi_virus_accession) as f:
+        tmp_list = [line.strip('\n') for line in f]
+
+    print('NCBI Virus', len(tmp_list))
+    id_set.update(tmp_list)
+
+    print(term_list + ['NCBI Virus'], len(id_set))
 
-def chunks(lst, n):
-    for i in range(0, len(lst), n):
-        yield lst[i:i + n]
-        
-num_ids_for_request = 100
-if not os.path.exists(dir_metadata_today):
-    os.makedirs(dir_metadata_today)
-    
     for i, id_x_list in enumerate(chunks(list(id_set), num_ids_for_request)):
-        path_metadata_xxx_xml = os.path.join(dir_metadata_today, 'metadata_{}.xml'.format(i))
+        path_metadata_xxx_xml = os.path.join(dir_metadata, 'metadata_{}.xml'.format(i))
         print('Requesting {} ids --> {}'.format(len(id_x_list), path_metadata_xxx_xml))
 
         with open(path_metadata_xxx_xml, 'w') as fw:
@@ -61,7 +61,7 @@ if not os.path.exists(dir_metadata_today):
                 Entrez.efetch(db='nuccore', id=id_x_list, retmode='xml').read()
             )
 
-            
+
 term_to_uri_dict = {}
 
 for path_dict_xxx_csv in [os.path.join(dir_dict_ontology_standardization, name_xxx_csv) for name_xxx_csv in os.listdir(dir_dict_ontology_standardization) if name_xxx_csv.endswith('.csv')]:
@@ -72,144 +72,181 @@ for path_dict_xxx_csv in [os.path.join(dir_dict_ontology_standardization, name_x
             if len(line.split(',')) > 2:
                 term, uri = line.strip('\n').split('",')
                 term = term.strip('"')
-            else:    
+            else:
                 term, uri = line.strip('\n').split(',')
 
             term_to_uri_dict[term] = uri
 
 species_to_taxid_dict = {
-    'Homo sapiens': 9606
+    'Homo sapiens': 'http://purl.obolibrary.org/obo/NCBITaxon_9606'
 }
 
 
-if not os.path.exists(dir_fasta_and_yaml_today):
-    os.makedirs(dir_fasta_and_yaml_today)
+if not os.path.exists(dir_fasta_and_yaml):
+    os.makedirs(dir_fasta_and_yaml)
 
-    for path_metadata_xxx_xml in [os.path.join(dir_metadata_today, name_metadata_xxx_xml) for name_metadata_xxx_xml in os.listdir(dir_metadata_today) if name_metadata_xxx_xml.endswith('.xml')]:
-        tree = ET.parse(path_metadata_xxx_xml)
-        GBSet = tree.getroot()
+missing_value_list = []
+    
+for path_metadata_xxx_xml in [os.path.join(dir_metadata, name_metadata_xxx_xml) for name_metadata_xxx_xml in os.listdir(dir_metadata) if name_metadata_xxx_xml.endswith('.xml')]:
+    tree = ET.parse(path_metadata_xxx_xml)
+    GBSet = tree.getroot()
+
+    for GBSeq in GBSet:
+        accession_version = GBSeq.find('GBSeq_accession-version').text
+
+        GBSeq_sequence = GBSeq.find('GBSeq_sequence')
+        if GBSeq_sequence is None:
+            print(accession_version, ' - sequence not found')
+            continue
+
+
+        # A general default-empty yaml could be read from the definitive one
+        info_for_yaml_dict = {
+            'id': 'placeholder',
+            'host': {},
+            'sample': {},
+            'virus': {},
+            'technology': {},
+            'submitter': {}
+        }
+
+        info_for_yaml_dict['sample']['sample_id'] = accession_version
+        info_for_yaml_dict['sample']['source_database_accession'] = accession_version
+        info_for_yaml_dict['submitter']['authors'] = ';'.join([x.text for x in GBSeq.iter('GBAuthor')])
+
+
+        GBSeq_comment = GBSeq.find('GBSeq_comment')
+        if GBSeq_comment is not None and 'Assembly-Data' in GBSeq_comment.text:
+            GBSeq_comment_text = GBSeq_comment.text.split('##Assembly-Data-START## ; ')[1].split(' ; ##Assembly-Data-END##')[0]
+
+            for info_to_check, field_in_yaml in zip(
+                ['Assembly Method', 'Coverage', 'Sequencing Technology'],
+                ['sequence_assembly_method', 'sequencing_coverage', 'sample_sequencing_technology']
+            ):
+                if info_to_check in GBSeq_comment_text:
+                    tech_info_to_parse = GBSeq_comment_text.split('{} :: '.format(info_to_check))[1].split(' ;')[0]
+
+                    if field_in_yaml == 'sequencing_coverage':
+                        # A regular expression would be better!
+                        try:
+                            info_for_yaml_dict['technology'][field_in_yaml] = float(
+                                tech_info_to_parse.strip('(average)').strip("reads/nt").replace(',', '.').strip(' xX>'))
+                        except ValueError:
+                            print(accession_version, "Couldn't make sense of Coverage '%s'" % tech_info_to_parse)
+                            pass
+                    elif field_in_yaml == 'sample_sequencing_technology':
+                        new_seq_tec_list = []
+                        for seq_tec in tech_info_to_parse.split(';'):
+                            seq_tec = seq_tec.strip()
+                            if seq_tec in term_to_uri_dict:
+                                seq_tec = term_to_uri_dict[seq_tec]
+                            else:
+                                #print(accession_version, 'missing sample_sequencing_technology:', seq_tec)
+                                missing_value_list.append('\t'.join([accession_version, 'sample_sequencing_technology', seq_tec]))
 
-        for GBSeq in GBSet:
-            accession_version = GBSeq.find('GBSeq_accession-version').text
+                            new_seq_tec_list.append(seq_tec)
 
-            GBSeq_sequence = GBSeq.find('GBSeq_sequence')
-            if GBSeq_sequence is None:
-                print(accession_version, ' - sequence not found')
-                continue
+                        for n, seq_tec in enumerate(new_seq_tec_list):
+                            info_for_yaml_dict['technology'][field_in_yaml + ('' if n == 0 else str(n + 1))] = seq_tec
+                    else:
+                        info_for_yaml_dict['technology'][field_in_yaml] = tech_info_to_parse
 
 
-            # A general default-empty yaml could be read from the definitive one
-            info_for_yaml_dict = {
-                'id': 'placeholder',
-                'host': {},
-                'sample': {},
-                'virus': {},
-                'technology': {},
-                'submitter': {}
-            }
-
-
-            info_for_yaml_dict['sample']['sample_id'] = accession_version
-            info_for_yaml_dict['submitter']['authors'] = ';'.join([x.text for x in GBSeq.iter('GBAuthor')])
-
-
-            GBSeq_comment = GBSeq.find('GBSeq_comment')
-            if GBSeq_comment is not None and 'Assembly-Data' in GBSeq_comment.text:
-                GBSeq_comment_text = GBSeq_comment.text.split('##Assembly-Data-START## ; ')[1].split(' ; ##Assembly-Data-END##')[0]
-
-                for info_to_check, field_in_yaml in zip(
-                    ['Assembly Method', 'Coverage', 'Sequencing Technology'],
-                    ['sequence_assembly_method', 'sequencing_coverage', 'sample_sequencing_technology']
-                ):
-                    if info_to_check in GBSeq_comment_text:
-                        tech_info_to_parse = GBSeq_comment_text.split('{} :: '.format(info_to_check))[1].split(' ;')[0]
-                        
-                        if field_in_yaml == 'sequencing_coverage':
-                            # A regular expression would be better!
-                            info_for_yaml_dict['technology'][field_in_yaml] = ';'.join(
-                                [x.strip('(average)').strip("reads/nt").replace(',', '.').strip(' xX>') for x in tech_info_to_parse.split(';')]
-                            )
-                        elif field_in_yaml == 'sample_sequencing_technology':
-                            new_seq_tec_list = []
-                            for seq_tec in tech_info_to_parse.split(';'):
-                                seq_tec = seq_tec.strip()
-                                if seq_tec in term_to_uri_dict:
-                                    seq_tec = term_to_uri_dict[seq_tec]
-                                else:
-                                    print(accession_version, 'missing technologies:', seq_tec)
- 
-                                new_seq_tec_list.append(seq_tec)
-
-                            for n, seq_tec in enumerate(new_seq_tec_list):
-                                info_for_yaml_dict['technology'][field_in_yaml + ('' if n == 0 else str(n + 1))] = seq_tec
-                        else:
-                            info_for_yaml_dict['technology'][field_in_yaml] = tech_info_to_parse
+                    #term_to_uri_dict
 
-                        
-                        #term_to_uri_dict
+        for GBFeature in GBSeq.iter('GBFeature'):
+            if GBFeature.find('GBFeature_key').text != 'source':
+                continue
 
-            for GBFeature in GBSeq.iter('GBFeature'):
-                if GBFeature.find('GBFeature_key').text != 'source':
+            for GBQualifier in GBFeature.iter('GBQualifier'):
+                GBQualifier_value = GBQualifier.find('GBQualifier_value')
+                if GBQualifier_value is None:
                     continue
+                GBQualifier_value_text = GBQualifier_value.text
 
-                for GBQualifier in GBFeature.iter('GBQualifier'):
-                    GBQualifier_value = GBQualifier.find('GBQualifier_value')
-                    if GBQualifier_value is None:
-                        continue
-                    GBQualifier_value_text = GBQualifier_value.text
-
-                    GBQualifier_name_text = GBQualifier.find('GBQualifier_name').text
+                GBQualifier_name_text = GBQualifier.find('GBQualifier_name').text
 
-                    if GBQualifier_name_text == 'host':
-                        GBQualifier_value_text_list = GBQualifier_value_text.split('; ')
+                if GBQualifier_name_text == 'host':
+                    GBQualifier_value_text_list = GBQualifier_value_text.split('; ')
 
-                        info_for_yaml_dict['host']['host_common_name'] = GBQualifier_value_text_list[0]
+                    #info_for_yaml_dict['host']['host_common_name'] = GBQualifier_value_text_list[0] # Removed
 
-                        if GBQualifier_value_text_list[0] in species_to_taxid_dict:
-                            info_for_yaml_dict['host']['host_species'] = species_to_taxid_dict[GBQualifier_value_text_list[0]]
+                    if GBQualifier_value_text_list[0] in species_to_taxid_dict:
+                        info_for_yaml_dict['host']['host_species'] = species_to_taxid_dict[GBQualifier_value_text_list[0]]
 
-                        if len(GBQualifier_value_text_list) > 1:
-                            if GBQualifier_value_text_list[1] in ['male', 'female']:
-                                info_for_yaml_dict['host']['host_sex'] = GBQualifier_value_text_list[1]
-                            else:
-                                info_for_yaml_dict['host']['host_health_status'] = GBQualifier_value_text_list[1]
-
-                            if 'age' in GBQualifier_value_text:
-                                info_for_yaml_dict['host']['host_age'] = int(GBQualifier_value_text_list[2].split('age ')[1])
-                                info_for_yaml_dict['host']['host_age_unit'] = 'year'
-                    elif GBQualifier_name_text == 'collected_by':
-                        if any([x in GBQualifier_value_text.lower() for x in ['institute', 'hospital', 'city', 'center']]):
-                            info_for_yaml_dict['sample']['collecting_institution'] = GBQualifier_value_text
+                    if len(GBQualifier_value_text_list) > 1:
+                        if GBQualifier_value_text_list[1] in ['male', 'female']:
+                            if GBQualifier_value_text_list[1]=='male':
+                                info_for_yaml_dict['host']['host_sex'] = "http://purl.obolibrary.org/obo/PATO_0000384"
+                            elif GBQualifier_value_text_list[1]=='female':
+                                info_for_yaml_dict['host']['host_sex'] = "http://purl.obolibrary.org/obo/PATO_0000383"
+                        elif GBQualifier_value_text_list[1] in term_to_uri_dict:
+                            info_for_yaml_dict['host']['host_health_status'] = term_to_uri_dict[GBQualifier_value_text_list[1]]
                         else:
-                            info_for_yaml_dict['sample']['collector_name'] = GBQualifier_value_text
-                    elif GBQualifier_name_text == 'isolation_source':
-                        if GBQualifier_value_text in term_to_uri_dict:
-                            info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict[GBQualifier_value_text]
+                            #print(accession_version, 'missing {}:'.format(GBQualifier_name_text), GBQualifier_value_text_list[1])
+                            missing_value_list.append('\t'.join([accession_version, GBQualifier_name_text, GBQualifier_value_text_list[1]]))
+
+                        if 'age' in GBQualifier_value_text:
+                            info_for_yaml_dict['host']['host_age'] = int(GBQualifier_value_text_list[2].split('age ')[1])
+                            info_for_yaml_dict['host']['host_age_unit'] = 'year'
+                elif GBQualifier_name_text == 'collected_by':
+                    if any([x in GBQualifier_value_text.lower() for x in ['institute', 'hospital', 'city', 'center']]):
+                        info_for_yaml_dict['sample']['collecting_institution'] = GBQualifier_value_text
+                    else:
+                        info_for_yaml_dict['sample']['collector_name'] = GBQualifier_value_text
+                elif GBQualifier_name_text == 'isolation_source':
+                    if GBQualifier_value_text.upper() in term_to_uri_dict:
+                        GBQualifier_value_text = GBQualifier_value_text.upper() # For example, in case of 'usa: wa'
+                    
+                    if GBQualifier_value_text in term_to_uri_dict:
+                        info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict[GBQualifier_value_text]
+                    else:
+                        if GBQualifier_value_text in ['NP/OP swab', 'nasopharyngeal and oropharyngeal swab', 'nasopharyngeal/oropharyngeal swab', 'np/np swab', 'np/op']:
+                            info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict['nasopharyngeal swab']
+                            info_for_yaml_dict['sample']['specimen_source2'] = term_to_uri_dict['oropharyngeal swab']
+                        elif GBQualifier_value_text in ['nasopharyngeal swab/throat swab']:
+                            info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict['nasopharyngeal swab']
+                            info_for_yaml_dict['sample']['specimen_source2'] = term_to_uri_dict['throat swab']
+                        elif GBQualifier_value_text in ['nasopharyngeal aspirate/throat swab']:
+                            info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict['nasopharyngeal aspirate']
+                            info_for_yaml_dict['sample']['specimen_source2'] = term_to_uri_dict['throat swab']
                         else:
-                            if GBQualifier_value_text in ['NP/OP swab', 'nasopharyngeal and oropharyngeal swab', 'nasopharyngeal/oropharyngeal swab', 'np/np swab', 'np/op']:
-                                info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict['nasopharyngeal swab']
-                                info_for_yaml_dict['sample']['specimen_source2'] = term_to_uri_dict['oropharyngeal swab']
-                            else:
-                                print(accession_version, 'missing specimen_source:', GBQualifier_value_text)
-                    elif GBQualifier_name_text == 'collection_date':
-                        # TO_DO: which format we will use?
-                        info_for_yaml_dict['sample']['collection_date'] = GBQualifier_value_text
-                    elif GBQualifier_name_text in ['lat_lon', 'country']:
-                        if GBQualifier_value_text in term_to_uri_dict:
-                            GBQualifier_value_text = term_to_uri_dict[GBQualifier_value_text]
-                        else:
-                            print(accession_version, 'missing {}:'.format(GBQualifier_name_text), GBQualifier_value_text)
-
-                        info_for_yaml_dict['sample']['collection_location'] = GBQualifier_value_text
-                    elif GBQualifier_name_text == 'note':
-                        info_for_yaml_dict['sample']['additional_collection_information'] = GBQualifier_value_text
-                    elif GBQualifier_name_text == 'isolate':
-                        info_for_yaml_dict['virus']['virus_strain'] = GBQualifier_value_text
-                    elif GBQualifier_name_text == 'db_xref':
-                        info_for_yaml_dict['virus']['virus_species'] = int(GBQualifier_value_text.split('taxon:')[1])
-                        
-            with open(os.path.join(dir_fasta_and_yaml_today, '{}.fasta'.format(accession_version)), 'w') as fw:
-                fw.write('>{}\n{}'.format(accession_version, GBSeq_sequence.text.upper()))
-
-            with open(os.path.join(dir_fasta_and_yaml_today, '{}.yaml'.format(accession_version)), 'w') as fw:
-                yaml.dump(info_for_yaml_dict, fw, default_flow_style=False)
+                            #print(accession_version, 'missing specimen_source:', GBQualifier_value_text)
+                            missing_value_list.append('\t'.join([accession_version, 'specimen_source', GBQualifier_value_text]))
+                elif GBQualifier_name_text == 'collection_date':
+                    # TO_DO: which format we will use?
+                    info_for_yaml_dict['sample']['collection_date'] = GBQualifier_value_text
+                elif GBQualifier_name_text in ['lat_lon', 'country']:
+                    if GBQualifier_value_text == 'Hong Kong':
+                        GBQualifier_value_text = 'China: Hong Kong'
+                 
+                    
+                    if GBQualifier_value_text in term_to_uri_dict:
+                        GBQualifier_value_text = term_to_uri_dict[GBQualifier_value_text]
+                    else:
+                        #print(accession_version, 'missing {}:'.format(GBQualifier_name_text), GBQualifier_value_text)
+                        missing_value_list.append('\t'.join([accession_version, GBQualifier_name_text, GBQualifier_value_text]))
+
+                    info_for_yaml_dict['sample']['collection_location'] = GBQualifier_value_text
+                elif GBQualifier_name_text == 'note':
+                    info_for_yaml_dict['sample']['additional_collection_information'] = GBQualifier_value_text
+                elif GBQualifier_name_text == 'isolate':
+                    info_for_yaml_dict['virus']['virus_strain'] = GBQualifier_value_text
+                elif GBQualifier_name_text == 'db_xref':
+                    info_for_yaml_dict['virus']['virus_species'] = "http://purl.obolibrary.org/obo/NCBITaxon_"+GBQualifier_value_text.split('taxon:')[1]
+
+
+        # Remove technology key if empty!
+        if (info_for_yaml_dict['technology']=={}):
+            del info_for_yaml_dict['technology']
+
+        with open(os.path.join(dir_fasta_and_yaml, '{}.fasta'.format(accession_version)), 'w') as fw:
+            fw.write('>{}\n{}'.format(accession_version, GBSeq_sequence.text.upper()))
+
+        with open(os.path.join(dir_fasta_and_yaml, '{}.yaml'.format(accession_version)), 'w') as fw:
+            json.dump(info_for_yaml_dict, fw, indent=2)
+
+        
+if len(missing_value_list) > 0:
+    with open('missing_terms.tsv', 'w') as fw:
+        fw.write('\n'.join(missing_value_list))
diff --git a/scripts/sequences.acc b/scripts/sequences.acc
index d000a76..0ad0878 100644
--- a/scripts/sequences.acc
+++ b/scripts/sequences.acc
@@ -1,4 +1,125 @@
 NC_045512
+LC542809
+MT114412
+MT114413
+MT114414
+MT114415
+MT114416
+MT114417
+MT114418
+MT114419
+MT230904
+MT358401
+MT358402
+MT358637
+MT358644
+MT358645
+MT358646
+MT358647
+MT358648
+MT358649
+MT358650
+MT358651
+MT358652
+MT358653
+MT358654
+MT358655
+MT358656
+MT358657
+MT358658
+MT358659
+MT358660
+MT358661
+MT358662
+MT358663
+MT358664
+MT358665
+MT358666
+MT358667
+MT358668
+MT358669
+MT358670
+MT358671
+MT358672
+MT358673
+MT358674
+MT358675
+MT358676
+MT358677
+MT358678
+MT358679
+MT358680
+MT358681
+MT358682
+MT358683
+MT358684
+MT358685
+MT358686
+MT358687
+MT358688
+MT358689
+MT358690
+MT358691
+MT358692
+MT358693
+MT358694
+MT358695
+MT358696
+MT358697
+MT358698
+MT358699
+MT358700
+MT358701
+MT358702
+MT358703
+MT358704
+MT358705
+MT358706
+MT358707
+MT358708
+MT358709
+MT358710
+MT358711
+MT358712
+MT358713
+MT358714
+MT358715
+MT358716
+MT358717
+MT358718
+MT358719
+MT358720
+MT358721
+MT358722
+MT358723
+MT358724
+MT358725
+MT358726
+MT358727
+MT358728
+MT358729
+MT358730
+MT358731
+MT358732
+MT358733
+MT358734
+MT358735
+MT358736
+MT358737
+MT358738
+MT358739
+MT358740
+MT358741
+MT358742
+MT358743
+MT358744
+MT358745
+MT358746
+MT358747
+MT358748
+MT359231
+MT359865
+MT359866
 MT350234
 MT350236
 MT350237
diff --git a/workflows/pangenome-generate/merge-metadata.cwl b/workflows/pangenome-generate/merge-metadata.cwl
index 9164c09..4d9c808 100644
--- a/workflows/pangenome-generate/merge-metadata.cwl
+++ b/workflows/pangenome-generate/merge-metadata.cwl
@@ -5,14 +5,48 @@ hints:
     dockerPull: commonworkflowlanguage/cwltool_module
 inputs:
   metadata: File[]
-  metadataSchema: File
   subjects: string[]
+  metadataSchema:
+    type: File
+    inputBinding: {position: 2}
+  originalLabels:
+    type: File
+    inputBinding: {position: 3}
+  dups:
+    type: File?
+    inputBinding: {position: 4}
+  script:
+    type: File
+    inputBinding: {position: 1}
+    default: {class: File, location: merge-metadata.py}
 outputs:
   merged: stdout
 stdout: mergedmetadata.ttl
 requirements:
+  InlineJavascriptRequirement: {}
   InitialWorkDirRequirement:
-    listing:
-      - entry: {$include: merge-metadata.py}
-        entryname: merge-metadata.py
-baseCommand: [python3, merge-metadata.py]
+    listing: |
+          ${
+          var i = 0;
+          var b = 1;
+          var out = [];
+          for (; i < inputs.metadata.length; i++) {
+            var block = [];
+            var sub = [];
+            for (; i < (b*150) && i < inputs.metadata.length; i++) {
+              block.push(inputs.metadata[i]);
+              sub.push(inputs.subjects[i]);
+            }
+            out.push({
+              entryname: "block"+b,
+              entry: JSON.stringify(block)
+            });
+            out.push({
+              entryname: "subs"+b,
+              entry: JSON.stringify(sub)
+            });
+            b++;
+          }
+          return out;
+          }
+baseCommand: python
diff --git a/workflows/pangenome-generate/merge-metadata.py b/workflows/pangenome-generate/merge-metadata.py
index 64275b1..65d08a6 100644
--- a/workflows/pangenome-generate/merge-metadata.py
+++ b/workflows/pangenome-generate/merge-metadata.py
@@ -1,9 +1,28 @@
+import re
 import schema_salad.schema
 import schema_salad.jsonld_context
+import json
+import sys
+import os
+import logging
 
-metadataSchema = '$(inputs.metadataSchema.path)'
-metadata = $(inputs.metadata)
-subjects = $(inputs.subjects)
+metadataSchema = sys.argv[1]
+originalLabels = sys.argv[2]
+dups = None
+if len(sys.argv) == 4:
+    dups = sys.argv[3]
+
+def readitems(stem):
+    items = []
+    b = 1
+    while os.path.exists("%s%i" % (stem, b)):
+        with open("%s%i" % (stem, b)) as f:
+            items.extend(json.load(f))
+        b += 1
+    return items
+
+metadata = readitems("block")
+subjects = readitems("subs")
 
 (document_loader,
  avsc_names,
@@ -11,7 +30,20 @@ subjects = $(inputs.subjects)
  metaschema_loader) = schema_salad.schema.load_schema(metadataSchema)
 
 for i, m in enumerate(metadata):
-    doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, m["path"], True)
+    doc, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, m["path"], False, False)
     doc["id"] = subjects[i]
     g = schema_salad.jsonld_context.makerdf(subjects[i], doc, document_loader.ctx)
     print(g.serialize(format="ntriples").decode("utf-8"))
+
+if dups:
+    sameseqs = open(dups, "rt")
+    for d in sameseqs:
+        logging.warn(d)
+        g = re.match(r"\d+\t(.*)", d)
+        logging.warn("%s", g.group(1))
+        sp = g.group(1).split(",")
+        for n in sp[1:]:
+            print("<%s> <http://biohackathon.org/bh20-seq-schema/has_duplicate_sequence> <%s> ." % (n.strip(), sp[0].strip()))
+
+orig = open(originalLabels, "rt")
+print(orig.read())
diff --git a/workflows/pangenome-generate/minimap2.cwl b/workflows/pangenome-generate/minimap2.cwl
index bf19ef7..42d1dce 100644
--- a/workflows/pangenome-generate/minimap2.cwl
+++ b/workflows/pangenome-generate/minimap2.cwl
@@ -12,7 +12,7 @@ hints:
   ResourceRequirement:
     coresMin: 8
     coresMax: 32
-    ramMin: $(7 * 1024)
+    ramMin: $(9 * 1024)
     outdirMin: $(Math.ceil(inputs.readsFA.size/(1024*1024*1024) + 20))
 stdout: $(inputs.readsFA.nameroot).paf
 baseCommand: minimap2
diff --git a/workflows/pangenome-generate/pangenome-generate.cwl b/workflows/pangenome-generate/pangenome-generate.cwl
index 2710743..6794e2d 100644
--- a/workflows/pangenome-generate/pangenome-generate.cwl
+++ b/workflows/pangenome-generate/pangenome-generate.cwl
@@ -18,13 +18,22 @@ outputs:
   odgiRDF:
     type: File
     outputSource: odgi2rdf/rdf
+  readsMergeDedup:
+    type: File
+    outputSource: dedup/readsMergeDedup
   mergedMetadata:
     type: File
     outputSource: mergeMetadata/merged
 steps:
+  relabel:
+    in:
+      readsFA: inputReads
+      subjects: subjects
+    out: [relabeledSeqs, originalLabels]
+    run: relabel-seqs.cwl
   dedup:
-    in: {readsFA: inputReads}
-    out: [readsMergeDedup]
+    in: {readsFA: relabel/relabeledSeqs}
+    out: [readsMergeDedup, dups]
     run: seqkit-rmdup.cwl
   overlapReads:
     in: {readsFA: dedup/readsMergeDedup}
@@ -53,5 +62,7 @@ steps:
       metadata: metadata
       metadataSchema: metadataSchema
       subjects: subjects
+      dups: dedup/dups
+      originalLabels: relabel/originalLabels
     out: [merged]
     run: merge-metadata.cwl
diff --git a/workflows/pangenome-generate/relabel-seqs.cwl b/workflows/pangenome-generate/relabel-seqs.cwl
new file mode 100644
index 0000000..c1f17a4
--- /dev/null
+++ b/workflows/pangenome-generate/relabel-seqs.cwl
@@ -0,0 +1,50 @@
+cwlVersion: v1.1
+class: CommandLineTool
+inputs:
+  readsFA: File[]
+  subjects: string[]
+  script:
+    type: File
+    default: {class: File, location: relabel-seqs.py}
+    inputBinding: {}
+outputs:
+  relabeledSeqs:
+    type: File
+    outputBinding:
+      glob: relabeledSeqs.fasta
+  originalLabels:
+    type: File
+    outputBinding:
+      glob: originalLabels.ttl
+requirements:
+  InlineJavascriptRequirement: {}
+  InitialWorkDirRequirement:
+    listing: |
+          ${
+          var i = 0;
+          var b = 1;
+          var out = [];
+          for (; i < inputs.readsFA.length; i++) {
+            var block = [];
+            var sub = [];
+            for (; i < (b*150) && i < inputs.readsFA.length; i++) {
+              block.push(inputs.readsFA[i]);
+              sub.push(inputs.subjects[i]);
+            }
+            out.push({
+              entryname: "block"+b,
+              entry: JSON.stringify(block)
+            });
+            out.push({
+              entryname: "subs"+b,
+              entry: JSON.stringify(sub)
+            });
+            b++;
+          }
+          return out;
+          }
+hints:
+  DockerRequirement:
+    dockerPull: commonworkflowlanguage/cwltool_module
+stdout:
+baseCommand: [python]
diff --git a/workflows/pangenome-generate/relabel-seqs.py b/workflows/pangenome-generate/relabel-seqs.py
new file mode 100644
index 0000000..6b022a0
--- /dev/null
+++ b/workflows/pangenome-generate/relabel-seqs.py
@@ -0,0 +1,30 @@
+import os
+import json
+
+def readitems(stem):
+    items = []
+    b = 1
+    while os.path.exists("%s%i" % (stem, b)):
+        with open("%s%i" % (stem, b)) as f:
+            items.extend(json.load(f))
+        b += 1
+    return items
+
+reads = readitems("block")
+subjects = readitems("subs")
+
+relabeled_fasta = open("relabeledSeqs.fasta", "wt")
+original_labels = open("originalLabels.ttl", "wt")
+
+for i, r in enumerate(reads):
+    with open(r["path"], "rt") as fa:
+        label = fa.readline()
+        original_labels.write("<%s> <http://biohackathon.org/bh20-seq-schema/original_fasta_label> \"%s\" .\n" % (subjects[i], label[1:].strip().replace('"', '\\"')))
+        relabeled_fasta.write(">"+subjects[i]+"\n")
+        data = fa.read(8096)
+        while data:
+            relabeled_fasta.write(data)
+            endswithnewline = data.endswith("\n")
+            data = fa.read(8096)
+        if not endswithnewline:
+            relabeled_fasta.write("\n")
diff --git a/workflows/pangenome-generate/seqkit-rmdup.cwl b/workflows/pangenome-generate/seqkit-rmdup.cwl
index d3626f5..071fa66 100644
--- a/workflows/pangenome-generate/seqkit-rmdup.cwl
+++ b/workflows/pangenome-generate/seqkit-rmdup.cwl
@@ -1,12 +1,16 @@
 cwlVersion: v1.1
 class: CommandLineTool
 inputs:
-  readsFA: File[]
+  readsFA: File
 outputs:
   readsMergeDedup:
     type: File
     outputBinding:
       glob: readsMergeDedup.fasta
+  dups:
+    type: File?
+    outputBinding:
+      glob: dups.txt
 requirements:
   InlineJavascriptRequirement: {}
 hints:
@@ -28,5 +32,6 @@ baseCommand: seqkit
 arguments: [rmdup,
             --by-seq,
             --ignore-case,
+            --dup-num-file, dups.txt,
             -o, readsMergeDedup.fasta,
             $(inputs.readsFA)]
diff --git a/workflows/pangenome-generate/testjob.yml b/workflows/pangenome-generate/testjob.yml
new file mode 100644
index 0000000..a48aff8
--- /dev/null
+++ b/workflows/pangenome-generate/testjob.yml
@@ -0,0 +1,16 @@
+inputReads:
+  - class: File
+    location: ../../example/sequence.fasta
+  - class: File
+    location: ../../example/sequence.fasta
+metadata:
+  - class: File
+    location: ../../example/metadata.yaml
+  - class: File
+    location: ../../example/metadata.yaml
+metadataSchema:
+  class: File
+  location: ../../bh20sequploader/bh20seq-schema.yml
+subjects:
+  - http://arvados.org/keep/seq1
+  - http://arvados.org/keep/seq2