diff options
-rw-r--r-- | bh20sequploader/bh20seq-options.yml | 17 | ||||
-rw-r--r-- | bh20sequploader/bh20seq-schema.yml | 2 | ||||
-rw-r--r-- | bh20simplewebuploader/main.py | 86 | ||||
-rw-r--r-- | bh20simplewebuploader/templates/form.html | 19 | ||||
-rw-r--r-- | scripts/dict_ontology_standardization/ncbi_countries.csv | 243 | ||||
-rw-r--r-- | scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv | 15 | ||||
-rw-r--r-- | scripts/dict_ontology_standardization/ncbi_speciesman_source.csv | 22 | ||||
-rw-r--r-- | scripts/from_genbank_to_fasta_and_yaml.py | 270 | ||||
-rw-r--r-- | setup.py | 2 |
9 files changed, 553 insertions, 123 deletions
diff --git a/bh20sequploader/bh20seq-options.yml b/bh20sequploader/bh20seq-options.yml new file mode 100644 index 0000000..d05be5a --- /dev/null +++ b/bh20sequploader/bh20seq-options.yml @@ -0,0 +1,17 @@ +# Contains suggested human-readable field values and their corresponding IRIs. +# Keyed on the field names in the types in the schema. Relies on field names +# being unique or at least using the same options in different containing +# types. + +host_age_unit: + Years: http://purl.obolibrary.org/obo/UO_0000036 + Months: http://purl.obolibrary.org/obo/UO_0000035 + Weeks: http://purl.obolibrary.org/obo/UO_0000034 + Days: http://purl.obolibrary.org/obo/UO_0000033 + Hours: http://purl.obolibrary.org/obo/UO_0000032 + +host_sex: + Male: http://purl.obolibrary.org/obo/NCIT_C20197 + Female: http://purl.obolibrary.org/obo/NCIT_C27993 + Intersex: http://purl.obolibrary.org/obo/NCIT_C45908 + Unknown: http://purl.obolibrary.org/obo/NCIT_C17998 diff --git a/bh20sequploader/bh20seq-schema.yml b/bh20sequploader/bh20seq-schema.yml index 7ffc15b..4cd0865 100644 --- a/bh20sequploader/bh20seq-schema.yml +++ b/bh20sequploader/bh20seq-schema.yml @@ -30,7 +30,7 @@ $graph: # jsonldPredicate: # _id: http://purl.obolibrary.org/obo/NOMEN_0000037 host_sex: - doc: Sex of the host as define in NCIT, IRI expected (http://purl.obolibrary.org/obo/C20197 (Male), http://purl.obolibrary.org/obo/NCIT_C27993 (Female) or unkown (http://purl.obolibrary.org/obo/NCIT_C17998)) + doc: Sex of the host as defined in NCIT, IRI expected (http://purl.obolibrary.org/obo/NCIT_C20197 (Male), http://purl.obolibrary.org/obo/NCIT_C27993 (Female), http://purl.obolibrary.org/obo/NCIT_C45908 (Intersex), or http://purl.obolibrary.org/obo/NCIT_C17998 (Unknown)) type: string jsonldPredicate: _id: http://purl.obolibrary.org/obo/PATO_0000047 diff --git a/bh20simplewebuploader/main.py b/bh20simplewebuploader/main.py index f5324a5..8c5c18c 100644 --- a/bh20simplewebuploader/main.py +++ b/bh20simplewebuploader/main.py @@ -7,7 +7,7 @@ import sys import re import string import yaml -import urllib.request +import pkg_resources from flask import Flask, request, redirect, send_file, send_from_directory, render_template import os.path @@ -25,7 +25,7 @@ app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024 @app.errorhandler(413) def handle_large_file(e): return (render_template('error.html', - error_message="One of your files is too large. The maximum file size is 1 megabyte."), 413) + error_message="One of your files is too large. The maximum file size is 50 megabytes."), 413) def type_to_heading(type_name): @@ -49,12 +49,32 @@ def name_to_label(field_name): return string.capwords(field_name.replace('_', ' ')) -def generate_form(schema): +def is_iri(string): """ - Linearize the schema and send a bunch of dicts. + Return True if the given string looks like an IRI, and False otherwise. + + Used for finding type IRIs in the schema. + + Right now only supports http(s) URLs because that's all we have in our schema. + """ + + return string.startswith('http') + +def generate_form(schema, options): + """ + Linearize the schema into a list of dicts. + Each dict either has a 'heading' (in which case we put a heading for a form section in the template) or an 'id', 'label', 'type', and 'required' - (in which case we make a form field in the template). + (in which case we make a form field in the template). Non-heading dicts + with type 'select' will have an 'options' field, with a list of (name, + value) tuples, and represent a form dropdown element. Non-heading dicts may + have a human-readable 'docstring' field describing them. + + Takes the deserialized metadata schema YAML, and also a deserialized YAML + of option values. The option values are keyed on (unscoped) field name in + the schema, and each is a dict of human readable option -> corresponding + IRI. """ # Get the list of form components, one of which is the root @@ -90,16 +110,35 @@ def generate_form(schema): for field_name, field_type in by_name.get(type_name, {}).get('fields', {}).items(): # For each field - ref_url = None + ref_iri = None + docstring = None if not isinstance(field_type, str): # If the type isn't a string + + # It may have documentation + docstring = field_type.get('doc', None) + # See if it has a more info/what goes here URL predicate = field_type.get('jsonldPredicate', {}) - if not isinstance(predicate, str): - ref_url = predicate.get('_id', None) + # Predicate may be a URL, a dict with a URL in _id, maybe a + # dict with a URL in _type, or a dict with _id and _type but no + # URLs anywhere. Some of these may not technically be allowed + # by the format, but if they occur, we might as well try to + # handle them. + if isinstance(predicate, str): + if is_iri(predicate): + ref_iri = predicate else: - ref_url = predicate # not sure this is correct - # Grab out its type field + # Assume it's a dict. Look at the fields we know about. + for field in ['_id', 'type']: + field_value = predicate.get(field, None) + if isinstance(field_value, str) and is_iri(field_value) and ref_iri is None: + # Take the first URL-looking thing we find + ref_iri = field_value + break + + + # Now overwrite the field type with the actual type string field_type = field_type.get('type', '') # Decide if the field is optional (type ends in ?) @@ -115,14 +154,26 @@ def generate_form(schema): for item in walk_fields(field_type, parent_keys + [field_name], subtree_optional or optional): yield item else: - # We know how to make a string input + # This is a leaf field. We need an input for it. record = {} record['id'] = '.'.join(parent_keys + [field_name]) record['label'] = name_to_label(field_name) record['required'] = not optional and not subtree_optional - if ref_url: - record['ref_url'] = ref_url - if field_type == 'string': + if ref_iri: + record['ref_iri'] = ref_iri + if docstring: + record['docstring'] = docstring + + if field_name in options: + # The field will be a 'select' type no matter what its real + # data type is. + record['type'] = 'select' # Not a real HTML input type. It's its own tag. + # We have a set of values to present + record['options'] = [] + for name, value in options[field_name].items(): + # Make a tuple for each one + record['options'].append((name, value)) + elif field_type == 'string': record['type'] = 'text' # HTML input type elif field_type == 'int': record['type'] = 'number' @@ -133,9 +184,10 @@ def generate_form(schema): return list(walk_fields(root_name)) -# At startup, we need to load the current metadata schema so we can make a form for it -METADATA_SCHEMA = yaml.safe_load(urllib.request.urlopen('https://raw.githubusercontent.com/arvados/bh20-seq-resource/master/bh20sequploader/bh20seq-schema.yml')) -FORM_ITEMS = generate_form(METADATA_SCHEMA) +# At startup, we need to load the metadata schema from the uploader module, so we can make a form for it +METADATA_SCHEMA = yaml.safe_load(pkg_resources.resource_stream("bh20sequploader", "bh20seq-schema.yml")) +METADATA_OPTION_DEFINITIONS = yaml.safe_load(pkg_resources.resource_stream("bh20sequploader", "bh20seq-options.yml")) +FORM_ITEMS = generate_form(METADATA_SCHEMA, METADATA_OPTION_DEFINITIONS) @app.route('/') def send_form(): diff --git a/bh20simplewebuploader/templates/form.html b/bh20simplewebuploader/templates/form.html index df66e8c..6993cf5 100644 --- a/bh20simplewebuploader/templates/form.html +++ b/bh20simplewebuploader/templates/form.html @@ -211,7 +211,6 @@ <div id="metadata_fill_form_spot"> <div id="metadata_fill_form"> - {{ record }} {% for record in fields %} {% if 'heading' in record %} @@ -221,15 +220,27 @@ <div class="record"> <h4>{{ record['heading'] }}</h4> {% else %} - <label for="{{ record['id'] }}"> + <label for="{{ record['id'] }}" title="{{ record.get('docstring', '') }}"> {{ record['label'] }} {{ "*" if record['required'] else "" }} - {% if 'ref_url' in record %} - <a href="{{ record['ref_url'] }}" title="More Info" target="_blank">?</a> + {% if 'docstring' in record %} + <a href='javascript:alert({{ record['docstring'] | tojson }})'>❓</a> + {% endif %} + {% if 'ref_iri' in record %} + <a href="{{ record['ref_iri'] }}" target="_blank" title="Ontology Link">🔗</a> {% endif %} </label> + {% if record['type'] == 'select' %} + <select id="{{ record['id'] }}" name="{{ record['id'] }}" {{ "required" if record['required'] else "" }}> + <option value="" selected>Choose one...</option> + {% for option in record['options'] %} + <option value="{{ option[1] }}">{{ option[0] }}</option> + {% endfor %} + </select> + {% else %} <input type="{{ record['type'] }}" id="{{ record['id'] }}" name="{{ record['id'] }}" {{ "required" if record['required'] else "" }}> {% endif %} + {% endif %} {% if loop.index == loop.length %} </div> {% endif %} diff --git a/scripts/dict_ontology_standardization/ncbi_countries.csv b/scripts/dict_ontology_standardization/ncbi_countries.csv new file mode 100644 index 0000000..9813f52 --- /dev/null +++ b/scripts/dict_ontology_standardization/ncbi_countries.csv @@ -0,0 +1,243 @@ +USA,http://www.wikidata.org/entity/Q30 +USA: CA,http://www.wikidata.org/entity/Q99 +USA: ME,http://www.wikidata.org/entity/Q724 +USA: NH,http://www.wikidata.org/entity/Q759 +USA: AL,http://www.wikidata.org/entity/Q173 +USA: MA,http://www.wikidata.org/entity/Q771 +USA: CT,http://www.wikidata.org/entity/Q779 +USA: AK,http://www.wikidata.org/entity/Q797 +USA: HI,http://www.wikidata.org/entity/Q782 +USA: FL,http://www.wikidata.org/entity/Q812 +USA: AZ,http://www.wikidata.org/entity/Q816 +USA: OR,http://www.wikidata.org/entity/Q824 +USA: UT,http://www.wikidata.org/entity/Q829 +USA: MI,http://www.wikidata.org/entity/Q1166 +USA: IL,http://www.wikidata.org/entity/Q1204 +USA: ND,http://www.wikidata.org/entity/Q1207 +USA: SD,http://www.wikidata.org/entity/Q1211 +USA: ID,http://www.wikidata.org/entity/Q1221 +USA: MT,http://www.wikidata.org/entity/Q1212 +USA: WY,http://www.wikidata.org/entity/Q1214 +USA: WA,http://www.wikidata.org/entity/Q1223 +USA: NV,http://www.wikidata.org/entity/Q1227 +USA: CO,http://www.wikidata.org/entity/Q1261 +USA: WV,http://www.wikidata.org/entity/Q1371 +USA: VA,http://www.wikidata.org/entity/Q1370 +USA: RI,http://www.wikidata.org/entity/Q1387 +USA: NY,http://www.wikidata.org/entity/Q1384 +USA: DE,http://www.wikidata.org/entity/Q1393 +USA: OH,http://www.wikidata.org/entity/Q1397 +USA: MD,http://www.wikidata.org/entity/Q1391 +USA: NJ,http://www.wikidata.org/entity/Q1408 +USA: GA,http://www.wikidata.org/entity/Q1428 +USA: PA,http://www.wikidata.org/entity/Q1400 +USA: IN,http://www.wikidata.org/entity/Q1415 +USA: TX,http://www.wikidata.org/entity/Q1439 +USA: NC,http://www.wikidata.org/entity/Q1454 +USA: MS,http://www.wikidata.org/entity/Q1494 +USA: SC,http://www.wikidata.org/entity/Q1456 +USA: NM,http://www.wikidata.org/entity/Q1522 +USA: TN,http://www.wikidata.org/entity/Q1509 +USA: MN,http://www.wikidata.org/entity/Q1527 +USA: WI,http://www.wikidata.org/entity/Q1537 +USA: NE,http://www.wikidata.org/entity/Q1553 +USA: IA,http://www.wikidata.org/entity/Q1546 +USA: OK,http://www.wikidata.org/entity/Q1649 +USA: KS,http://www.wikidata.org/entity/Q1558 +USA: VT,http://www.wikidata.org/entity/Q16551 +USA: MO,http://www.wikidata.org/entity/Q1581 +USA: LA,http://www.wikidata.org/entity/Q1588 +USA: KY,http://www.wikidata.org/entity/Q1603 +USA: AR,http://www.wikidata.org/entity/Q1612 +China,http://www.wikidata.org/entity/Q148 +China: Beijing,http://www.wikidata.org/entity/Q956 +China: Hong Kong,http://www.wikidata.org/entity/Q8646 +China: Shanghai,http://www.wikidata.org/entity/Q8686 +China: Chongqing,http://www.wikidata.org/entity/Q11725 +China: Tianjin,http://www.wikidata.org/entity/Q11736 +China: Macau,http://www.wikidata.org/entity/Q14773 +China: Guangdong,http://www.wikidata.org/entity/Q15175 +China: Guangxi Zhuang Autonomous Region,http://www.wikidata.org/entity/Q15176 +China: Jiangsu,http://www.wikidata.org/entity/Q16963 +China: Zhejiang,http://www.wikidata.org/entity/Q16967 +China: Tibet Autonomous Region,http://www.wikidata.org/entity/Q17269 +China: Heilongjiang,http://www.wikidata.org/entity/Q19206 +China: Sichuan,http://www.wikidata.org/entity/Q19770 +China: Hebei,http://www.wikidata.org/entity/Q21208 +China: Xinjiang,http://www.wikidata.org/entity/Q34800 +China: Anhui,http://www.wikidata.org/entity/Q40956 +China: Inner Mongolia,http://www.wikidata.org/entity/Q41079 +China: Fujian,http://www.wikidata.org/entity/Q41705 +China: Hainan,http://www.wikidata.org/entity/Q42200 +China: Gansu,http://www.wikidata.org/entity/Q42392 +China: Yunnan,http://www.wikidata.org/entity/Q43194 +China: Shandong,http://www.wikidata.org/entity/Q43407 +China: Henan,http://www.wikidata.org/entity/Q43684 +China: Liaoning,http://www.wikidata.org/entity/Q43934 +China: Jilin,http://www.wikidata.org/entity/Q45208 +China: Hunan,http://www.wikidata.org/entity/Q45761 +China: Hubei,http://www.wikidata.org/entity/Q46862 +China: Qinghai,http://www.wikidata.org/entity/Q45833 +China: Shanxi,http://www.wikidata.org/entity/Q46913 +China: Shaanxi,http://www.wikidata.org/entity/Q47974 +China: Guizhou,http://www.wikidata.org/entity/Q47097 +China: Jiangxi,http://www.wikidata.org/entity/Q57052 +China: Ningxia Hui Autonomous Region,http://www.wikidata.org/entity/Q57448 +30.59 N 114.3 E,http://www.wikidata.org/entity/Q11746 +Sri Lanka,http://www.wikidata.org/entity/Q854 +Syria,http://www.wikidata.org/entity/Q858 +Tajikistan,http://www.wikidata.org/entity/Q863 +Canada,http://www.wikidata.org/entity/Q16 +Thailand,http://www.wikidata.org/entity/Q869 +Japan,http://www.wikidata.org/entity/Q17 +Turkmenistan,http://www.wikidata.org/entity/Q874 +Norway,http://www.wikidata.org/entity/Q20 +Ireland,http://www.wikidata.org/entity/Q27 +United Arab Emirates,http://www.wikidata.org/entity/Q878 +Vietnam,http://www.wikidata.org/entity/Q881 +United States of America,http://www.wikidata.org/entity/Q30 +South Korea,http://www.wikidata.org/entity/Q884 +Denmark,http://www.wikidata.org/entity/Q35 +Afghanistan,http://www.wikidata.org/entity/Q889 +Bangladesh,http://www.wikidata.org/entity/Q902 +Poland,http://www.wikidata.org/entity/Q36 +Mali,http://www.wikidata.org/entity/Q912 +Italy,http://www.wikidata.org/entity/Q38 +Angola,http://www.wikidata.org/entity/Q916 +Switzerland,http://www.wikidata.org/entity/Q39 +Bhutan,http://www.wikidata.org/entity/Q917 +Austria,http://www.wikidata.org/entity/Q40 +Brunei,http://www.wikidata.org/entity/Q921 +Tanzania,http://www.wikidata.org/entity/Q924 +Turkey,http://www.wikidata.org/entity/Q43 +Philippines,http://www.wikidata.org/entity/Q928 +Portugal,http://www.wikidata.org/entity/Q45 +Uruguay,http://www.wikidata.org/entity/Q77 +Central African Republic,http://www.wikidata.org/entity/Q929 +Togo,http://www.wikidata.org/entity/Q945 +Egypt,http://www.wikidata.org/entity/Q79 +Tunisia,http://www.wikidata.org/entity/Q948 +Zambia,http://www.wikidata.org/entity/Q953 +Mexico,http://www.wikidata.org/entity/Q96 +Zimbabwe,http://www.wikidata.org/entity/Q954 +South Sudan,http://www.wikidata.org/entity/Q958 +Kenya,http://www.wikidata.org/entity/Q114 +Benin,http://www.wikidata.org/entity/Q962 +Ethiopia,http://www.wikidata.org/entity/Q115 +Botswana,http://www.wikidata.org/entity/Q963 +Ghana,http://www.wikidata.org/entity/Q117 +Burkina Faso,http://www.wikidata.org/entity/Q965 +Burundi,http://www.wikidata.org/entity/Q967 +France,http://www.wikidata.org/entity/Q142 +Comoros,http://www.wikidata.org/entity/Q970 +United Kingdom,http://www.wikidata.org/entity/Q145 +Republic of the Congo,http://www.wikidata.org/entity/Q971 +People's Republic of China,http://www.wikidata.org/entity/Q148 +Democratic Republic of the Congo,http://www.wikidata.org/entity/Q974 +Brazil,http://www.wikidata.org/entity/Q155 +Djibouti,http://www.wikidata.org/entity/Q977 +Germany,http://www.wikidata.org/entity/Q183 +Eritrea,http://www.wikidata.org/entity/Q986 +The Gambia,http://www.wikidata.org/entity/Q1005 +Latvia,http://www.wikidata.org/entity/Q211 +Czech Republic,http://www.wikidata.org/entity/Q213 +Guinea,http://www.wikidata.org/entity/Q1006 +Guinea-Bissau,http://www.wikidata.org/entity/Q1007 +Slovakia,http://www.wikidata.org/entity/Q214 +Ivory Coast,http://www.wikidata.org/entity/Q1008 +Romania,http://www.wikidata.org/entity/Q218 +Cape Verde,http://www.wikidata.org/entity/Q1011 +Cameroon,http://www.wikidata.org/entity/Q1009 +Bulgaria,http://www.wikidata.org/entity/Q219 +Lesotho,http://www.wikidata.org/entity/Q1013 +Croatia,http://www.wikidata.org/entity/Q224 +Liberia,http://www.wikidata.org/entity/Q1014 +Libya,http://www.wikidata.org/entity/Q1016 +Kazakhstan,http://www.wikidata.org/entity/Q232 +Montenegro,http://www.wikidata.org/entity/Q236 +Madagascar,http://www.wikidata.org/entity/Q1019 +Barbados,http://www.wikidata.org/entity/Q244 +Indonesia,http://www.wikidata.org/entity/Q252 +Malawi,http://www.wikidata.org/entity/Q1020 +Mauritania,http://www.wikidata.org/entity/Q1025 +South Africa,http://www.wikidata.org/entity/Q258 +Mauritius,http://www.wikidata.org/entity/Q1027 +Algeria,http://www.wikidata.org/entity/Q262 +Morocco,http://www.wikidata.org/entity/Q1028 +Mozambique,http://www.wikidata.org/entity/Q1029 +Uzbekistan,http://www.wikidata.org/entity/Q265 +Namibia,http://www.wikidata.org/entity/Q1030 +Chile,http://www.wikidata.org/entity/Q298 +Niger,http://www.wikidata.org/entity/Q1032 +Singapore,http://www.wikidata.org/entity/Q334 +Nigeria,http://www.wikidata.org/entity/Q1033 +Bahrain,http://www.wikidata.org/entity/Q398 +Uganda,http://www.wikidata.org/entity/Q1036 +Australia,http://www.wikidata.org/entity/Q408 +Rwanda,http://www.wikidata.org/entity/Q1037 +Argentina,http://www.wikidata.org/entity/Q414 +São Tomé and Príncipe,http://www.wikidata.org/entity/Q1039 +Peru,http://www.wikidata.org/entity/Q419 +Senegal,http://www.wikidata.org/entity/Q1041 +Seychelles,http://www.wikidata.org/entity/Q1042 +North Korea,http://www.wikidata.org/entity/Q423 +Sierra Leone,http://www.wikidata.org/entity/Q1044 +Cambodia,http://www.wikidata.org/entity/Q424 +Sudan,http://www.wikidata.org/entity/Q1049 +Somalia,http://www.wikidata.org/entity/Q1045 +Eswatini,http://www.wikidata.org/entity/Q1050 +East Timor,http://www.wikidata.org/entity/Q574 +Chad,http://www.wikidata.org/entity/Q657 +New Zealand,http://www.wikidata.org/entity/Q664 +Kingdom of the Netherlands,http://www.wikidata.org/entity/Q29999 +India,http://www.wikidata.org/entity/Q668 +Tuvalu,http://www.wikidata.org/entity/Q672 +Samoa,http://www.wikidata.org/entity/Q683 +Solomon Islands,http://www.wikidata.org/entity/Q685 +Vanuatu,http://www.wikidata.org/entity/Q686 +Papua New Guinea,http://www.wikidata.org/entity/Q691 +Palau,http://www.wikidata.org/entity/Q695 +Nauru,http://www.wikidata.org/entity/Q697 +Federated States of Micronesia,http://www.wikidata.org/entity/Q702 +Marshall Islands,http://www.wikidata.org/entity/Q709 +Kiribati,http://www.wikidata.org/entity/Q710 +Mongolia,http://www.wikidata.org/entity/Q711 +Fiji,http://www.wikidata.org/entity/Q712 +Venezuela,http://www.wikidata.org/entity/Q717 +Paraguay,http://www.wikidata.org/entity/Q733 +Guyana,http://www.wikidata.org/entity/Q734 +Ecuador,http://www.wikidata.org/entity/Q736 +Colombia,http://www.wikidata.org/entity/Q739 +Bolivia,http://www.wikidata.org/entity/Q750 +Trinidad and Tobago,http://www.wikidata.org/entity/Q754 +Saint Vincent and the Grenadines,http://www.wikidata.org/entity/Q757 +Saint Lucia,http://www.wikidata.org/entity/Q760 +Saint Kitts and Nevis,http://www.wikidata.org/entity/Q763 +Jamaica,http://www.wikidata.org/entity/Q766 +Grenada,http://www.wikidata.org/entity/Q769 +Guatemala,http://www.wikidata.org/entity/Q774 +The Bahamas,http://www.wikidata.org/entity/Q778 +Antigua and Barbuda,http://www.wikidata.org/entity/Q781 +Honduras,http://www.wikidata.org/entity/Q783 +Dominica,http://www.wikidata.org/entity/Q784 +Dominican Republic,http://www.wikidata.org/entity/Q786 +Haiti,http://www.wikidata.org/entity/Q790 +El Salvador,http://www.wikidata.org/entity/Q792 +Iran,http://www.wikidata.org/entity/Q794 +Iraq,http://www.wikidata.org/entity/Q796 +Costa Rica,http://www.wikidata.org/entity/Q800 +Israel,http://www.wikidata.org/entity/Q801 +Yemen,http://www.wikidata.org/entity/Q805 +Jordan,http://www.wikidata.org/entity/Q810 +Nicaragua,http://www.wikidata.org/entity/Q811 +Kyrgyzstan,http://www.wikidata.org/entity/Q813 +Laos,http://www.wikidata.org/entity/Q819 +Lebanon,http://www.wikidata.org/entity/Q822 +Maldives,http://www.wikidata.org/entity/Q826 +Malaysia,http://www.wikidata.org/entity/Q833 +Myanmar,http://www.wikidata.org/entity/Q836 +Nepal,http://www.wikidata.org/entity/Q837 +Oman,http://www.wikidata.org/entity/Q842 +Pakistan,http://www.wikidata.org/entity/Q843 +Qatar,http://www.wikidata.org/entity/Q846 +Saudi Arabia,http://www.wikidata.org/entity/Q851
\ No newline at end of file diff --git a/scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv b/scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv new file mode 100644 index 0000000..3ec7e09 --- /dev/null +++ b/scripts/dict_ontology_standardization/ncbi_sequencing_technology.csv @@ -0,0 +1,15 @@ +Illumian NextSeq 500,http://www.ebi.ac.uk/efo/EFO_0009173 +Illumina NextSeq 500,http://www.ebi.ac.uk/efo/EFO_0009173 +Nanopore MinION,http://www.ebi.ac.uk/efo/EFO_0008632 +Oxford Nanopore MinION,http://www.ebi.ac.uk/efo/EFO_0008632 +ONT (Oxford Nanopore Technologies),http://www.ebi.ac.uk/efo/EFO_0008632 +Oxford Nanopore technologies MinION,http://www.ebi.ac.uk/efo/EFO_0008632 +MinION Oxford Nanopore,http://www.ebi.ac.uk/efo/EFO_0008632 +Illumina MiSeq,http://www.ebi.ac.uk/efo/EFO_0004205 +Illumina,http://purl.obolibrary.org/obo/OBI_0000759 +Oxford Nanopore technology,http://purl.obolibrary.org/obo/NCIT_C146818 +Oxford Nanopore Technologies,http://purl.obolibrary.org/obo/NCIT_C146818 +Oxford Nanopore,http://purl.obolibrary.org/obo/NCIT_C146818 +IonTorrent,http://purl.obolibrary.org/obo/NCIT_C125894 +Ion Torrent X5Plus,http://purl.obolibrary.org/obo/NCIT_C125894 +Sanger dideoxy sequencing,http://purl.obolibrary.org/obo/NCIT_C19641 diff --git a/scripts/dict_ontology_standardization/ncbi_speciesman_source.csv b/scripts/dict_ontology_standardization/ncbi_speciesman_source.csv new file mode 100644 index 0000000..fcd6c94 --- /dev/null +++ b/scripts/dict_ontology_standardization/ncbi_speciesman_source.csv @@ -0,0 +1,22 @@ +nasopharyngeal swab, http://purl.obolibrary.org/obo/NCIT_C155831 +nasopharyngeal exudate,http://purl.obolibrary.org/obo/NCIT_C155831 +respiratory swab,http://purl.obolibrary.org/obo/NCIT_C155831 +naso-pharyngeal exudate,http://purl.obolibrary.org/obo/NCIT_C155831 +nasopharyngeal aspirate,http://purl.obolibrary.org/obo/NCIT_C155831 +nasal swab specimen,http://purl.obolibrary.org/obo/NCIT_C155831 +pharyngeal swab,http://purl.obolibrary.org/obo/NCIT_C155831 +respiratory secretion,http://purl.obolibrary.org/obo/NCIT_C155831 +mid-nasal swab,http://purl.obolibrary.org/obo/NCIT_C155831 +nasopharyngeal (throat) washings,http://purl.obolibrary.org/obo/NCIT_C155831 +oropharyngeal swab,http://purl.obolibrary.org/obo/NCIT_C155835 +throat swab,http://purl.obolibrary.org/obo/NCIT_C155835 +oro-pharyngeal,http://purl.obolibrary.org/obo/NCIT_C155835 +buccal swab,http://purl.obolibrary.org/obo/NCIT_C155835 +throat washing,http://purl.obolibrary.org/obo/NCIT_C155835 +Throat Swab,http://purl.obolibrary.org/obo/NCIT_C155835 +throat (oropharyngeal) swab,http://purl.obolibrary.org/obo/NCIT_C155835 +bronchoalveolar lavage fluid,http://purl.obolibrary.org/obo/NCIT_C13195 +swab,http://purl.obolibrary.org/obo/NCIT_C13195 +oral swab,http://purl.obolibrary.org/obo/NCIT_C13195 +bronchoalveolar lavage,http://purl.obolibrary.org/obo/NCIT_C13195 +sputum,http://purl.obolibrary.org/obo/NCIT_C13278 diff --git a/scripts/from_genbank_to_fasta_and_yaml.py b/scripts/from_genbank_to_fasta_and_yaml.py index 0cc1a57..a7c9dc2 100644 --- a/scripts/from_genbank_to_fasta_and_yaml.py +++ b/scripts/from_genbank_to_fasta_and_yaml.py @@ -1,15 +1,19 @@ from Bio import Entrez -Entrez.email = 'your_email_to_be_polite' +Entrez.email = 'andresguarahino@gmail.com' import xml.etree.ElementTree as ET import yaml import os -path_ncbi_virus_accession = 'sequences.acc' +from datetime import date +today = date.today().strftime("%Y%m%d") + +dir_metadata_today = 'metadata_from_nuccore_{}'.format(today) +dir_fasta_and_yaml_today = 'fasta_and_yaml_{}'.format(today) -date = '20200414' -path_seq_fasta = 'seq_from_nuccore.{}.fasta'.format(date) -path_metadata_xml = 'metadata_from_nuccore.{}.xml'.format(date) +dir_dict_ontology_standardization = 'dict_ontology_standardization/' + +path_ncbi_virus_accession = 'sequences.acc' # Take all the ids id_set = set() @@ -19,9 +23,15 @@ for term in term_list: tmp_list = Entrez.read( Entrez.esearch(db='nuccore', term=term, idtype='acc', retmax='10000') )['IdList'] - print(term, len(tmp_list)) - + + # Remove mRNAs, ncRNAs, Proteins, and predicted models (more information here: https://en.wikipedia.org/wiki/RefSeq) + tmp_list = [x for x in tmp_list if x[:2] not in ['NM', 'NR', 'NP', 'XM', 'XR', 'XP', 'WP']] + # Remove the version in the id + tmp_list = [x.split('.')[0] for x in tmp_list] + + print(term, len(tmp_list)) + id_set.update([x.split('.')[0] for x in tmp_list]) print(term_list, len(id_set)) @@ -34,108 +44,168 @@ id_set.update(tmp_list) print(term_list + ['NCBI Virus'], len(id_set)) -if not os.path.exists(path_metadata_xml): - # TO_DO: to check if I already have the records? - - with open(path_metadata_xml, 'w') as fw: - fw.write( - Entrez.efetch(db='nuccore', id=list(id_set), retmode='xml').read() - ) - +def chunks(lst, n): + for i in range(0, len(lst), n): + yield lst[i:i + n] -tree = ET.parse(path_metadata_xml) -GBSet = tree.getroot() +num_ids_for_request = 100 +if not os.path.exists(dir_metadata_today): + os.makedirs(dir_metadata_today) + + for i, id_x_list in enumerate(chunks(list(id_set), num_ids_for_request)): + path_metadata_xxx_xml = os.path.join(dir_metadata_today, 'metadata_{}.xml'.format(i)) + print('Requesting {} ids --> {}'.format(len(id_x_list), path_metadata_xml)) + + with open(path_metadata_xxx_xml, 'w') as fw: + fw.write( + Entrez.efetch(db='nuccore', id=id_x_list, retmode='xml').read() + ) + + +term_to_uri_dict = {} + +for path_dict_xxx_csv in [os.path.join(dir_dict_ontology_standardization, name_xxx_csv) for name_xxx_csv in os.listdir(dir_dict_ontology_standardization) if name_xxx_csv.endswith('.csv')]: + print('Read {}'.format(path_dict_xxx_csv)) + + with open(path_dict_xxx_csv) as f: + for line in f: + term, uri = line.strip('\n').split(',') + + term_to_uri_dict[term] = uri species_to_taxid_dict = { 'Homo sapiens': 9606 } -for GBSeq in GBSet: - accession_version = GBSeq.find('GBSeq_accession-version').text - - GBSeq_sequence = GBSeq.find('GBSeq_sequence') - if GBSeq_sequence is None: - print(accession_version, ' - sequence not found') - continue +if os.path.exists(dir_fasta_and_yaml_today): + os.makedirs(dir_fasta_and_yaml_today) - # A general default-empty yaml could be read from the definitive one - info_for_yaml_dict = { - 'id': 'placeholder', - 'host': {}, - 'sample': {}, - 'virus': {}, - 'technology': {}, - 'submitter': {} - } - + for path_metadata_xxx_xml in [os.path.join(dir_metadata_today, name_metadata_xxx_xml) for name_metadata_xxx_xml in os.listdir(dir_metadata_today) if name_metadata_xxx_xml.endswith('.xml')]: + tree = ET.parse(path_metadata_xxx_xml) + GBSet = tree.getroot() - info_for_yaml_dict['sample']['sample_id'] = accession_version - info_for_yaml_dict['submitter']['authors'] = ';'.join([x.text for x in GBSeq.iter('GBAuthor')]) + for GBSeq in GBSet: + accession_version = GBSeq.find('GBSeq_accession-version').text - - GBSeq_comment = GBSeq.find('GBSeq_comment') - if GBSeq_comment is not None and 'Assembly-Data' in GBSeq_comment.text: - GBSeq_comment_text = GBSeq_comment.text.split('##Assembly-Data-START## ; ')[1].split(' ; ##Assembly-Data-END##')[0] - - for info_to_check, field_in_yaml in zip( - ['Assembly Method', 'Coverage', 'Sequencing Technology'], - ['sequence_assembly_method', 'sequencing_coverage', 'sample_sequencing_technology'] - ): - if info_to_check in GBSeq_comment_text: - info_for_yaml_dict['technology'][field_in_yaml] = GBSeq_comment_text.split('{} :: '.format(info_to_check))[1].split(' ;')[0] - - - for GBFeature in GBSeq.iter('GBFeature'): - if GBFeature.find('GBFeature_key').text != 'source': - continue - - for GBQualifier in GBFeature.iter('GBQualifier'): - GBQualifier_value = GBQualifier.find('GBQualifier_value') - if GBQualifier_value is None: + GBSeq_sequence = GBSeq.find('GBSeq_sequence') + if GBSeq_sequence is None: + print(accession_version, ' - sequence not found') continue - GBQualifier_value_text = GBQualifier_value.text - - GBQualifier_name_text = GBQualifier.find('GBQualifier_name').text - - if GBQualifier_name_text == 'host': - GBQualifier_value_text_list = GBQualifier_value_text.split('; ') - - info_for_yaml_dict['host']['host_common_name'] = GBQualifier_value_text_list[0] - - if GBQualifier_value_text_list[0] in species_to_taxid_dict: - info_for_yaml_dict['host']['host_species'] = species_to_taxid_dict[GBQualifier_value_text_list[0]] - - if len(GBQualifier_value_text_list) > 1: - if GBQualifier_value_text_list[1] in ['male', 'female']: - info_for_yaml_dict['host']['host_sex'] = GBQualifier_value_text_list[1] - else: - info_for_yaml_dict['host']['host_health_status'] = GBQualifier_value_text_list[1] - - if 'age' in GBQualifier_value_text: - info_for_yaml_dict['host']['host_age'] = int(GBQualifier_value_text_list[2].split('age ')[1]) - info_for_yaml_dict['host']['host_age_unit'] = 'year' - elif GBQualifier_name_text == 'collected_by': - if any([x in GBQualifier_value_text.lower() for x in ['institute', 'hospital', 'city', 'center']]): - info_for_yaml_dict['sample']['collecting_institution'] = GBQualifier_value_text - else: - info_for_yaml_dict['sample']['collector_name'] = GBQualifier_value_text - elif GBQualifier_name_text == 'isolation_source': - info_for_yaml_dict['sample']['specimen_source'] = GBQualifier_value_text - elif GBQualifier_name_text == 'collection_date': - # TO_DO: which format we will use? - info_for_yaml_dict['sample']['collection_date'] = GBQualifier_value_text - elif GBQualifier_name_text in ['lat_lon', 'country']: - info_for_yaml_dict['sample']['collection_location'] = GBQualifier_value_text - elif GBQualifier_name_text == 'note': - info_for_yaml_dict['sample']['additional_collection_information'] = GBQualifier_value_text - elif GBQualifier_name_text == 'isolate': - info_for_yaml_dict['virus']['virus_strain'] = GBQualifier_value_text - elif GBQualifier_name_text == 'db_xref': - info_for_yaml_dict['virus']['virus_species'] = int(GBQualifier_value_text.split('taxon:')[1]) - - with open('{}.fasta'.format(accession_version), 'w') as fw: - fw.write('>{}\n{}'.format(accession_version, GBSeq_sequence.text.upper())) - with open('{}.yaml'.format(accession_version), 'w') as fw: - yaml.dump(info_for_yaml_dict, fw, default_flow_style=False) + + # A general default-empty yaml could be read from the definitive one + info_for_yaml_dict = { + 'id': 'placeholder', + 'host': {}, + 'sample': {}, + 'virus': {}, + 'technology': {}, + 'submitter': {} + } + + + info_for_yaml_dict['sample']['sample_id'] = accession_version + info_for_yaml_dict['submitter']['authors'] = ';'.join([x.text for x in GBSeq.iter('GBAuthor')]) + + + GBSeq_comment = GBSeq.find('GBSeq_comment') + if GBSeq_comment is not None and 'Assembly-Data' in GBSeq_comment.text: + GBSeq_comment_text = GBSeq_comment.text.split('##Assembly-Data-START## ; ')[1].split(' ; ##Assembly-Data-END##')[0] + + for info_to_check, field_in_yaml in zip( + ['Assembly Method', 'Coverage', 'Sequencing Technology'], + ['sequence_assembly_method', 'sequencing_coverage', 'sample_sequencing_technology'] + ): + if info_to_check in GBSeq_comment_text: + tech_info_to_parse = GBSeq_comment_text.split('{} :: '.format(info_to_check))[1].split(' ;')[0] + + if field_in_yaml == 'sequencing_coverage': + # A regular expression would be better! + info_for_yaml_dict['technology'][field_in_yaml] = ';'.join( + [x.strip('(average)').strip("reads/nt").replace(',', '.').strip(' xX>') for x in tech_info_to_parse.split(';')] + ) + elif field_in_yaml == 'sample_sequencing_technology': + new_seq_tec_list = [] + for seq_tec in tech_info_to_parse.split(';'): + seq_tec = seq_tec.strip() + if seq_tec in term_to_uri_dict: + seq_tec = term_to_uri_dict[seq_tec] + else: + print(accession_version, 'missing technologies:', seq_tec) + + new_seq_tec_list.append(seq_tec) + + for n, seq_tec in enumerate(new_seq_tec_list): + info_for_yaml_dict['technology'][field_in_yaml + ('' if n == 0 else str(n + 1))] = seq_tec + else: + info_for_yaml_dict['technology'][field_in_yaml] = tech_info_to_parse + + + #term_to_uri_dict + + for GBFeature in GBSeq.iter('GBFeature'): + if GBFeature.find('GBFeature_key').text != 'source': + continue + + for GBQualifier in GBFeature.iter('GBQualifier'): + GBQualifier_value = GBQualifier.find('GBQualifier_value') + if GBQualifier_value is None: + continue + GBQualifier_value_text = GBQualifier_value.text + + GBQualifier_name_text = GBQualifier.find('GBQualifier_name').text + + if GBQualifier_name_text == 'host': + GBQualifier_value_text_list = GBQualifier_value_text.split('; ') + + info_for_yaml_dict['host']['host_common_name'] = GBQualifier_value_text_list[0] + + if GBQualifier_value_text_list[0] in species_to_taxid_dict: + info_for_yaml_dict['host']['host_species'] = species_to_taxid_dict[GBQualifier_value_text_list[0]] + + if len(GBQualifier_value_text_list) > 1: + if GBQualifier_value_text_list[1] in ['male', 'female']: + info_for_yaml_dict['host']['host_sex'] = GBQualifier_value_text_list[1] + else: + info_for_yaml_dict['host']['host_health_status'] = GBQualifier_value_text_list[1] + + if 'age' in GBQualifier_value_text: + info_for_yaml_dict['host']['host_age'] = int(GBQualifier_value_text_list[2].split('age ')[1]) + info_for_yaml_dict['host']['host_age_unit'] = 'year' + elif GBQualifier_name_text == 'collected_by': + if any([x in GBQualifier_value_text.lower() for x in ['institute', 'hospital', 'city', 'center']]): + info_for_yaml_dict['sample']['collecting_institution'] = GBQualifier_value_text + else: + info_for_yaml_dict['sample']['collector_name'] = GBQualifier_value_text + elif GBQualifier_name_text == 'isolation_source': + if GBQualifier_value_text in term_to_uri_dict: + info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict[GBQualifier_value_text] + else: + if GBQualifier_value_text in ['NP/OP swab', 'nasopharyngeal and oropharyngeal swab', 'nasopharyngeal/oropharyngeal swab', 'np/np swab']: + info_for_yaml_dict['sample']['specimen_source'] = term_to_uri_dict['nasopharyngeal swab'] + info_for_yaml_dict['sample']['specimen_source2'] = term_to_uri_dict['oropharyngeal swab'] + else: + print(accession_version, 'missing specimen_source:', GBQualifier_value_text) + elif GBQualifier_name_text == 'collection_date': + # TO_DO: which format we will use? + info_for_yaml_dict['sample']['collection_date'] = GBQualifier_value_text + elif GBQualifier_name_text in ['lat_lon', 'country']: + if GBQualifier_value_text in term_to_uri_dict: + GBQualifier_value_text = term_to_uri_dict[GBQualifier_value_text] + else: + print(accession_version, 'missing {}:'.format(GBQualifier_name_text), GBQualifier_value_text) + + info_for_yaml_dict['sample']['collection_location'] = GBQualifier_value_text + elif GBQualifier_name_text == 'note': + info_for_yaml_dict['sample']['additional_collection_information'] = GBQualifier_value_text + elif GBQualifier_name_text == 'isolate': + info_for_yaml_dict['virus']['virus_strain'] = GBQualifier_value_text + elif GBQualifier_name_text == 'db_xref': + info_for_yaml_dict['virus']['virus_species'] = int(GBQualifier_value_text.split('taxon:')[1]) + + with open(os.path.join(dir_fasta_and_yaml_today, '{}.fasta'.format(accession_version)), 'w') as fw: + fw.write('>{}\n{}'.format(accession_version, GBSeq_sequence.text.upper())) + + with open(os.path.join(dir_fasta_and_yaml_today, '{}.yaml'.format(accession_version)), 'w') as fw: + yaml.dump(info_for_yaml_dict, fw, default_flow_style=False) @@ -31,7 +31,7 @@ setup( author_email="peter.amstutz@curii.com", license="Apache 2.0", packages=["bh20sequploader", "bh20seqanalyzer", "bh20simplewebuploader"], - package_data={"bh20sequploader": ["bh20seq-schema.yml", "validation/formats"], + package_data={"bh20sequploader": ["bh20seq-schema.yml", "bh20seq-options.yml", "validation/formats"], }, install_requires=install_requires, extras_require={ |