From 5de5fb93e50fe9006221b30ac4a66f1be0db82e7 Mon Sep 17 00:00:00 2001 From: Terry Truong Date: Sun, 11 Sep 2022 14:55:42 +1000 Subject: Add backend unit tests - Add unit testing code in backend/tests/ - Change to snake-case for script/file/directory names - Use os.path.join() instead of '/' - Refactor script code into function defs and a main-guard - Make global vars all-caps Some fixes: - For getting descriptions, some wiki redirects weren't properly resolved - Linked images were sub-optimally propagated - Generation of reduced trees assumed a wiki-id association implied a description - Tilo.py had potential null dereferences by not always using a reduced node set - EOL image downloading didn't properly wait for all threads to end when finishing --- backend/README.md | 12 +- backend/server.py | 2 +- backend/tests/__init__.py | 0 backend/tests/common.py | 49 +++ backend/tests/dbpedia/__init__.py | 0 backend/tests/dbpedia/test_gen_desc_data.py | 107 +++++++ backend/tests/enwiki/__init__.py | 0 .../enwiki/sample_enwiki_pages_articles.xml.bz2 | Bin 0 -> 41998 bytes .../tests/enwiki/test_download_img_license_info.py | 185 +++++++++++ backend/tests/enwiki/test_download_imgs.py | 54 ++++ backend/tests/enwiki/test_gen_desc_data.py | 37 +++ backend/tests/enwiki/test_gen_dump_index_db.py | 39 +++ backend/tests/enwiki/test_gen_img_data.py | 64 ++++ backend/tests/enwiki/test_gen_pageview_data.py | 44 +++ backend/tests/eol/__init__.py | 0 backend/tests/eol/test_download_imgs.py | 74 +++++ backend/tests/eol/test_gen_images_list_db.py | 32 ++ backend/tests/eol/test_review_imgs.py | 46 +++ backend/tests/green.png | Bin 0 -> 5067 bytes backend/tests/red.png | Bin 0 -> 5067 bytes backend/tests/test_gen_desc_data.py | 101 ++++++ backend/tests/test_gen_imgs.py | 125 ++++++++ backend/tests/test_gen_linked_imgs.py | 84 +++++ backend/tests/test_gen_mapping_data.py | 302 ++++++++++++++++++ backend/tests/test_gen_name_data.py | 93 ++++++ backend/tests/test_gen_otol_data.py | 118 ++++++++ backend/tests/test_gen_pop_data.py | 42 +++ backend/tests/test_gen_reduced_trees.py | 166 ++++++++++ backend/tests/test_review_imgs_to_gen.py | 84 +++++ backend/tests/test_tilo.py | 160 ++++++++++ backend/tests/wikidata/__init__.py | 0 backend/tests/wikidata/test_gen_taxon_src_data.py | 109 +++++++ backend/tilo.py | 86 ++++-- backend/tolData/README.md | 149 --------- backend/tolData/dbpedia/README.md | 29 -- backend/tolData/dbpedia/genDescData.py | 128 -------- backend/tolData/enwiki/README.md | 63 ---- backend/tolData/enwiki/downloadImgLicenseInfo.py | 147 --------- backend/tolData/enwiki/downloadImgs.py | 88 ------ backend/tolData/enwiki/genDescData.py | 124 -------- backend/tolData/enwiki/genDumpIndexDb.py | 56 ---- backend/tolData/enwiki/genImgData.py | 186 ------------ backend/tolData/enwiki/genPageviewData.py | 62 ---- backend/tolData/enwiki/lookupPage.py | 66 ---- backend/tolData/eol/README.md | 31 -- backend/tolData/eol/downloadImgs.py | 142 --------- backend/tolData/eol/genImagesListDb.py | 34 --- backend/tolData/eol/reviewImgs.py | 202 ------------ backend/tolData/genDescData.py | 90 ------ backend/tolData/genImgs.py | 196 ------------ backend/tolData/genLinkedImgs.py | 124 -------- backend/tolData/genMappingData.py | 229 -------------- backend/tolData/genNameData.py | 113 ------- backend/tolData/genOtolData.py | 246 --------------- backend/tolData/genPopData.py | 39 --- backend/tolData/genReducedTrees.py | 334 -------------------- backend/tolData/otol/README.md | 19 -- backend/tolData/pickedImgs/README.md | 10 - backend/tolData/reviewImgsToGen.py | 223 -------------- backend/tolData/wikidata/README.md | 18 -- backend/tolData/wikidata/genTaxonSrcData.py | 240 --------------- backend/tol_data/README.md | 155 ++++++++++ backend/tol_data/__init__.py | 0 backend/tol_data/dbpedia/README.md | 29 ++ backend/tol_data/dbpedia/__init__.py | 0 backend/tol_data/dbpedia/gen_desc_data.py | 120 ++++++++ backend/tol_data/enwiki/README.md | 63 ++++ backend/tol_data/enwiki/__init__.py | 0 .../tol_data/enwiki/download_img_license_info.py | 154 ++++++++++ backend/tol_data/enwiki/download_imgs.py | 99 ++++++ backend/tol_data/enwiki/gen_desc_data.py | 126 ++++++++ backend/tol_data/enwiki/gen_dump_index_db.py | 60 ++++ backend/tol_data/enwiki/gen_img_data.py | 193 ++++++++++++ backend/tol_data/enwiki/gen_pageview_data.py | 68 +++++ backend/tol_data/enwiki/lookup_page.py | 71 +++++ backend/tol_data/eol/README.md | 31 ++ backend/tol_data/eol/__init__.py | 0 backend/tol_data/eol/download_imgs.py | 152 ++++++++++ backend/tol_data/eol/gen_images_list_db.py | 39 +++ backend/tol_data/eol/review_imgs.py | 213 +++++++++++++ backend/tol_data/gen_desc_data.py | 92 ++++++ backend/tol_data/gen_imgs.py | 214 +++++++++++++ backend/tol_data/gen_linked_imgs.py | 117 +++++++ backend/tol_data/gen_mapping_data.py | 271 +++++++++++++++++ backend/tol_data/gen_name_data.py | 128 ++++++++ backend/tol_data/gen_otol_data.py | 267 ++++++++++++++++ backend/tol_data/gen_pop_data.py | 45 +++ backend/tol_data/gen_reduced_trees.py | 337 +++++++++++++++++++++ backend/tol_data/otol/README.md | 19 ++ backend/tol_data/picked_imgs/README.md | 10 + backend/tol_data/review_imgs_to_gen.py | 241 +++++++++++++++ backend/tol_data/wikidata/README.md | 18 ++ backend/tol_data/wikidata/__init__.py | 0 backend/tol_data/wikidata/gen_taxon_src_data.py | 239 +++++++++++++++ 94 files changed, 5764 insertions(+), 3410 deletions(-) create mode 100644 backend/tests/__init__.py create mode 100644 backend/tests/common.py create mode 100644 backend/tests/dbpedia/__init__.py create mode 100644 backend/tests/dbpedia/test_gen_desc_data.py create mode 100644 backend/tests/enwiki/__init__.py create mode 100644 backend/tests/enwiki/sample_enwiki_pages_articles.xml.bz2 create mode 100644 backend/tests/enwiki/test_download_img_license_info.py create mode 100644 backend/tests/enwiki/test_download_imgs.py create mode 100644 backend/tests/enwiki/test_gen_desc_data.py create mode 100644 backend/tests/enwiki/test_gen_dump_index_db.py create mode 100644 backend/tests/enwiki/test_gen_img_data.py create mode 100644 backend/tests/enwiki/test_gen_pageview_data.py create mode 100644 backend/tests/eol/__init__.py create mode 100644 backend/tests/eol/test_download_imgs.py create mode 100644 backend/tests/eol/test_gen_images_list_db.py create mode 100644 backend/tests/eol/test_review_imgs.py create mode 100644 backend/tests/green.png create mode 100644 backend/tests/red.png create mode 100644 backend/tests/test_gen_desc_data.py create mode 100644 backend/tests/test_gen_imgs.py create mode 100644 backend/tests/test_gen_linked_imgs.py create mode 100644 backend/tests/test_gen_mapping_data.py create mode 100644 backend/tests/test_gen_name_data.py create mode 100644 backend/tests/test_gen_otol_data.py create mode 100644 backend/tests/test_gen_pop_data.py create mode 100644 backend/tests/test_gen_reduced_trees.py create mode 100644 backend/tests/test_review_imgs_to_gen.py create mode 100644 backend/tests/test_tilo.py create mode 100644 backend/tests/wikidata/__init__.py create mode 100644 backend/tests/wikidata/test_gen_taxon_src_data.py delete mode 100644 backend/tolData/README.md delete mode 100644 backend/tolData/dbpedia/README.md delete mode 100755 backend/tolData/dbpedia/genDescData.py delete mode 100644 backend/tolData/enwiki/README.md delete mode 100755 backend/tolData/enwiki/downloadImgLicenseInfo.py delete mode 100755 backend/tolData/enwiki/downloadImgs.py delete mode 100755 backend/tolData/enwiki/genDescData.py delete mode 100755 backend/tolData/enwiki/genDumpIndexDb.py delete mode 100755 backend/tolData/enwiki/genImgData.py delete mode 100755 backend/tolData/enwiki/genPageviewData.py delete mode 100755 backend/tolData/enwiki/lookupPage.py delete mode 100644 backend/tolData/eol/README.md delete mode 100755 backend/tolData/eol/downloadImgs.py delete mode 100755 backend/tolData/eol/genImagesListDb.py delete mode 100755 backend/tolData/eol/reviewImgs.py delete mode 100755 backend/tolData/genDescData.py delete mode 100755 backend/tolData/genImgs.py delete mode 100755 backend/tolData/genLinkedImgs.py delete mode 100755 backend/tolData/genMappingData.py delete mode 100755 backend/tolData/genNameData.py delete mode 100755 backend/tolData/genOtolData.py delete mode 100755 backend/tolData/genPopData.py delete mode 100755 backend/tolData/genReducedTrees.py delete mode 100644 backend/tolData/otol/README.md delete mode 100644 backend/tolData/pickedImgs/README.md delete mode 100755 backend/tolData/reviewImgsToGen.py delete mode 100644 backend/tolData/wikidata/README.md delete mode 100755 backend/tolData/wikidata/genTaxonSrcData.py create mode 100644 backend/tol_data/README.md create mode 100644 backend/tol_data/__init__.py create mode 100644 backend/tol_data/dbpedia/README.md create mode 100644 backend/tol_data/dbpedia/__init__.py create mode 100755 backend/tol_data/dbpedia/gen_desc_data.py create mode 100644 backend/tol_data/enwiki/README.md create mode 100644 backend/tol_data/enwiki/__init__.py create mode 100755 backend/tol_data/enwiki/download_img_license_info.py create mode 100755 backend/tol_data/enwiki/download_imgs.py create mode 100755 backend/tol_data/enwiki/gen_desc_data.py create mode 100755 backend/tol_data/enwiki/gen_dump_index_db.py create mode 100755 backend/tol_data/enwiki/gen_img_data.py create mode 100755 backend/tol_data/enwiki/gen_pageview_data.py create mode 100755 backend/tol_data/enwiki/lookup_page.py create mode 100644 backend/tol_data/eol/README.md create mode 100644 backend/tol_data/eol/__init__.py create mode 100755 backend/tol_data/eol/download_imgs.py create mode 100755 backend/tol_data/eol/gen_images_list_db.py create mode 100755 backend/tol_data/eol/review_imgs.py create mode 100755 backend/tol_data/gen_desc_data.py create mode 100755 backend/tol_data/gen_imgs.py create mode 100755 backend/tol_data/gen_linked_imgs.py create mode 100755 backend/tol_data/gen_mapping_data.py create mode 100755 backend/tol_data/gen_name_data.py create mode 100755 backend/tol_data/gen_otol_data.py create mode 100755 backend/tol_data/gen_pop_data.py create mode 100755 backend/tol_data/gen_reduced_trees.py create mode 100644 backend/tol_data/otol/README.md create mode 100644 backend/tol_data/picked_imgs/README.md create mode 100755 backend/tol_data/review_imgs_to_gen.py create mode 100644 backend/tol_data/wikidata/README.md create mode 100644 backend/tol_data/wikidata/__init__.py create mode 100755 backend/tol_data/wikidata/gen_taxon_src_data.py (limited to 'backend') diff --git a/backend/README.md b/backend/README.md index fc68183..0f3f332 100644 --- a/backend/README.md +++ b/backend/README.md @@ -1,5 +1,11 @@ # Files -- **tolData**: Holds scripts for generating the tree-of-life database -- **tilo.py**: WSGI script that serves data from the tree-of-life database.
- Note: Using WSGI instead of CGI to avoid starting a new process for each request. +- **tol_data/**: Holds scripts for generating the tree-of-life database +- **tilo.py**: WSGI script that serves data from the tree-of-life database.
+ Note: WSGI is used instead of CGI to avoid starting a new process for each request. - **server.py**: Basic dev server that serves the WSGI script and image files +- **tests/**: Holds unit testing scripts.
+ Running all tests: `python -m unittest discover -s tests`
+ Running a particular test: `python -m unittest tests/test_script1.py`
+ Getting code coverage info (uses python package 'coverage'):
+ 1. `coverage run -m unittest discover -s tests` + 2. `coverage report -m > report.txt` diff --git a/backend/server.py b/backend/server.py index 5b0d26b..5b4d050 100755 --- a/backend/server.py +++ b/backend/server.py @@ -18,7 +18,7 @@ def wrappingApp(environ: dict[str, str], start_response) -> Iterable[bytes]: if urlPath.startswith('/data/'): # Run WSGI script return application(environ, start_response) - elif urlPath.startswith('/tolData/img/'): + elif urlPath.startswith('/tol_data/img/'): # Serve image file imgPath = os.path.join(os.getcwd(), urlPath[1:]) if os.path.exists(imgPath): diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/common.py b/backend/tests/common.py new file mode 100644 index 0000000..cb455e4 --- /dev/null +++ b/backend/tests/common.py @@ -0,0 +1,49 @@ +""" +Utilities for testing +""" + +from typing import Any +import bz2, gzip, sqlite3 + +def createTestFile(filename: str, content: str) -> None: + """ Creates a file with the given name and contents """ + with open(filename, 'w') as file: + file.write(content) + +def readTestFile(filename: str) -> str: + """ Returns the contents of a file with the given name """ + with open(filename) as file: + return file.read() + +def createTestBz2(filename: str, content: str) -> None: + """ Creates a bzip2 file with the given name and contents """ + with bz2.open(filename, mode='wb') as file: + file.write(content.encode()) + +def createTestGzip(filename: str, content: str) -> None: + """ Creates a gzip file with the given name and contents """ + with gzip.open(filename, mode='wt') as file: + file.write(content) + +TableRows = set[tuple[Any, ...]] +def createTestDbTable(filename: str, createCmd: str | None, insertCmd: str, rows: TableRows) -> None: + """ Creates an sqlite db with a table specified by creation+insertion commands and records. + If 'createCmd' is None, just insert into an existing table.""" + dbCon = sqlite3.connect(filename) + dbCur = dbCon.cursor() + if createCmd is not None: + dbCur.execute(createCmd) + for row in rows: + dbCur.execute(insertCmd, row) + dbCon.commit() + dbCon.close() + +def readTestDbTable(filename: str, selectCmd: str) -> TableRows: + """ Returns the records in a sqlite db with the given name, using the given select command """ + rows: set[tuple[Any, ...]] = set() + dbCon = sqlite3.connect(filename) + dbCur = dbCon.cursor() + for row in dbCur.execute(selectCmd): + rows.add(row) + dbCon.close() + return rows diff --git a/backend/tests/dbpedia/__init__.py b/backend/tests/dbpedia/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/dbpedia/test_gen_desc_data.py b/backend/tests/dbpedia/test_gen_desc_data.py new file mode 100644 index 0000000..7d35677 --- /dev/null +++ b/backend/tests/dbpedia/test_gen_desc_data.py @@ -0,0 +1,107 @@ +import unittest +import tempfile, os + +from tests.common import createTestBz2, readTestDbTable +from tol_data.dbpedia.gen_desc_data import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp labels file + labelsFile = os.path.join(tempDir, 'labels.ttl.bz2') + createTestBz2(labelsFile, ( + ' "One"@en .\n' + ' "II"@en .\n' + ' "three"@en .\n' + ' "A Hat"@en .\n' + )) + # Create temp ids file + idsFile = f'{tempDir}ids.ttl.bz2' + createTestBz2(idsFile, ( + ' ' + ' "1"^^ .\n' + ' ' + ' "2"^^ .\n' + ' ' + ' "3"^^ .\n' + ' ' + ' "210"^^ .\n' + )) + # Create temp redirects file + redirectsFile = os.path.join(tempDir, 'redirects.ttl.bz2') + createTestBz2(redirectsFile, ( + ' ' + ' .\n' + )) + # Create temp disambig file + disambigFile = os.path.join(tempDir, 'disambig.ttl.bz2') + createTestBz2(disambigFile, ( + ' ' + ' .\n' + ' ' + ' .\n' + )) + # Create temp types file + typesFile = os.path.join(tempDir, 'types.ttl.bz2') + createTestBz2(typesFile, ( + ' ' + ' .\n' + ' ' + ' .\n' + )) + # Create temp abstracts file + abstractsFile = os.path.join(tempDir, 'abstracts.ttl.bz2') + createTestBz2(abstractsFile, ( + ' ' + ' "One is a number."@en .\n' + ' ' + ' "Hats are not parrots, nor are they potatoes."@en .\n' + )) + # Run + dbFile = os.path.join(tempDir, 'descData.db') + genData(labelsFile, idsFile, redirectsFile, disambigFile, typesFile, abstractsFile, dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT iri, label from labels'), + { + ('http://dbpedia.org/resource/One', 'One'), + ('http://dbpedia.org/resource/Two', 'II'), + ('http://dbpedia.org/resource/Three', 'three'), + ('http://dbpedia.org/resource/A_Hat', 'A Hat'), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT iri, id from ids'), + { + ('http://dbpedia.org/resource/One', 1), + ('http://dbpedia.org/resource/Two', 2), + ('http://dbpedia.org/resource/Three', 3), + ('http://dbpedia.org/resource/A_Hat', 210), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT iri, target from redirects'), + { + ('http://dbpedia.org/resource/Three', 'http://dbpedia.org/resource/A_Hat'), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT iri from disambiguations'), + { + ('http://dbpedia.org/resource/Two',), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT iri, type from types'), + { + ('http://dbpedia.org/resource/One', 'http://dbpedia.org/ontology/Thing'), + ('http://dbpedia.org/resource/Three', 'http://dbpedia.org/ontology/Thing'), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT iri, abstract from abstracts'), + { + ('http://dbpedia.org/resource/One', 'One is a number.'), + ('http://dbpedia.org/resource/A_Hat', 'Hats are not parrots, nor are they potatoes.'), + } + ) diff --git a/backend/tests/enwiki/__init__.py b/backend/tests/enwiki/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/enwiki/sample_enwiki_pages_articles.xml.bz2 b/backend/tests/enwiki/sample_enwiki_pages_articles.xml.bz2 new file mode 100644 index 0000000..2abfdaa Binary files /dev/null and b/backend/tests/enwiki/sample_enwiki_pages_articles.xml.bz2 differ diff --git a/backend/tests/enwiki/test_download_img_license_info.py b/backend/tests/enwiki/test_download_img_license_info.py new file mode 100644 index 0000000..ed6e426 --- /dev/null +++ b/backend/tests/enwiki/test_download_img_license_info.py @@ -0,0 +1,185 @@ +import unittest +from unittest.mock import Mock, patch +import tempfile, os + +from tests.common import createTestDbTable, readTestDbTable +from tol_data.enwiki.download_img_license_info import downloadInfo + +TEST_RESPONSE1 = { + 'batchcomplete': '', + 'query': { + 'normalized': [ + { + 'from': 'File:Georgia_Aquarium_-_Giant_Grouper_edit.jpg', + 'to': 'File:Georgia Aquarium - Giant Grouper edit.jpg' + } + ], + 'pages': { + '-1': { + 'ns': 6, + 'title': 'File:Octopus2.jpg', + 'missing': '', + 'known': '', + 'imagerepository': 'shared', + 'imageinfo': [ + { + 'url': 'https://upload.wikimedia.org/wikipedia/commons/5/57/Octopus2.jpg', + 'descriptionurl': 'https://commons.wikimedia.org/wiki/File:Octopus2.jpg', + 'descriptionshorturl': 'https://commons.wikimedia.org/w/index.php?curid=2795257', + 'extmetadata': { + 'Credit': { + 'value': 'Own work', + 'source': 'commons-desc-page', + 'hidden': '' + }, + 'Artist': { + 'value': 'albert kok', + 'source': 'commons-desc-page' + }, + 'LicenseShortName': { + 'value': 'CC BY-SA 3.0', + 'source': 'commons-desc-page', + 'hidden': '' + }, + 'Restrictions': { + 'value': '', + 'source': 'commons-desc-page', + 'hidden': '' + } + } + } + ] + } + } + } +} +TEST_RESPONSE2 = { + 'batchcomplete': '', + 'query': { + 'normalized': [ + { + 'from': 'File:Georgia_Aquarium_-_Giant_Grouper_edit.jpg', + 'to': 'File:Georgia Aquarium - Giant Grouper edit.jpg' + } + ], + 'pages': { + '-1': { + 'ns': 6, + 'title': 'File:Octopus2.jpg', + 'missing': '', + 'known': '', + 'imagerepository': 'shared', + 'imageinfo': [ + { + 'url': 'https://upload.wikimedia.org/wikipedia/commons/5/57/Octopus2.jpg', + 'descriptionurl': 'https://commons.wikimedia.org/wiki/File:Octopus2.jpg', + 'descriptionshorturl': 'https://commons.wikimedia.org/w/index.php?curid=2795257', + 'extmetadata': { + 'Credit': { + 'value': 'Own work', + 'source': 'commons-desc-page', + 'hidden': '' + }, + 'Artist': { + 'value': 'albert kok', + 'source': 'commons-desc-page' + }, + 'LicenseShortName': { + 'value': 'CC BY-SA 3.0', + 'source': 'commons-desc-page', + 'hidden': '' + }, + 'Restrictions': { + 'value': '', + 'source': 'commons-desc-page', + 'hidden': '' + } + } + } + ] + }, + '-2': { + 'ns': 6, + 'title': 'File:Georgia Aquarium - Giant Grouper edit.jpg', + 'missing': '', + 'known': '', + 'imagerepository': 'shared', + 'imageinfo': [ + { + 'url': 'https://upload.wikimedia.org/wikipedia/commons/2/23/Georgia_Aquarium_-_Giant_Grouper_edit.jpg', + 'descriptionurl': 'https://commons.wikimedia.org/wiki/File:Georgia_Aquarium_-_Giant_Grouper_edit.jpg', + 'descriptionshorturl': 'https://commons.wikimedia.org/w/index.php?curid=823649', + 'extmetadata': { + 'Credit': { + "value": "File:Georgia Aquarium - Giant Grouper.jpg", + 'source': 'commons-desc-page', + 'hidden': '' + }, + 'Artist': { + "value": "Taken by Diliff Edited by Fir0002", + 'source': 'commons-desc-page' + }, + 'LicenseShortName': { + 'value': 'CC BY 2.5', + 'source': 'commons-desc-page', + 'hidden': '' + }, + 'Restrictions': { + 'value': '', + 'source': 'commons-desc-page', + 'hidden': '' + } + } + } + ] + } + } + } +} + +class TestDownloadInfo(unittest.TestCase): + @patch('requests.get', autospec=True) + def test_download(self, requestsGetMock): + requestsGetMock.side_effect = [Mock(json=lambda: TEST_RESPONSE1), Mock(json=lambda: TEST_RESPONSE2)] + with tempfile.TemporaryDirectory() as tempDir: + # Create temp image-data db + imgDb = os.path.join(tempDir, 'img_data.db') + createTestDbTable( + imgDb, + 'CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)', + 'INSERT into page_imgs VALUES (?, ?)', + { + (1, 'Octopus2.jpg'), + } + ) + # Run + downloadInfo(imgDb) + # Check + self.assertEqual( + readTestDbTable(imgDb, 'SELECT name, license, artist, credit, restrictions, url from imgs'), + { + ('Octopus2.jpg', 'CC BY-SA 3.0', 'albert kok', 'Own work', '', + 'https://upload.wikimedia.org/wikipedia/commons/5/57/Octopus2.jpg'), + } + ) + # Run with updated image-data db + createTestDbTable( + imgDb, + None, + 'INSERT into page_imgs VALUES (?, ?)', + { + (2, 'Georgia_Aquarium_-_Giant_Grouper_edit.jpg'), + } + ) + downloadInfo(imgDb) + # Check + self.assertEqual( + readTestDbTable(imgDb, 'SELECT name, license, artist, credit, restrictions, url from imgs'), + { + ('Octopus2.jpg', 'CC BY-SA 3.0', 'albert kok', 'Own work', '', + 'https://upload.wikimedia.org/wikipedia/commons/5/57/Octopus2.jpg'), + ('Georgia_Aquarium_-_Giant_Grouper_edit.jpg', 'CC BY 2.5', 'Taken by Diliff Edited by Fir0002', + 'File:Georgia Aquarium - Giant Grouper.jpg', '', 'https://upload.wikimedia.org/' \ + 'wikipedia/commons/2/23/Georgia_Aquarium_-_Giant_Grouper_edit.jpg'), + } + ) diff --git a/backend/tests/enwiki/test_download_imgs.py b/backend/tests/enwiki/test_download_imgs.py new file mode 100644 index 0000000..2618b8a --- /dev/null +++ b/backend/tests/enwiki/test_download_imgs.py @@ -0,0 +1,54 @@ +import unittest +from unittest.mock import Mock, patch +import tempfile, os + +from tests.common import readTestFile, createTestDbTable +from tol_data.enwiki.download_imgs import downloadImgs + +class TestDownloadInfo(unittest.TestCase): + @patch('requests.get', autospec=True) + def test_download(self, requestsGetMock): + requestsGetMock.side_effect = lambda url, **kwargs: Mock(content=('img:' + url).encode()) + with tempfile.TemporaryDirectory() as tempDir: + # Create temp image-data db + imgDb = os.path.join(tempDir, 'img_data.db') + createTestDbTable( + imgDb, + 'CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)', + 'INSERT into page_imgs VALUES (?, ?)', + { + (1, 'one'), + (2, 'two'), + (3, 'three'), + (4, 'four'), + (5, 'five'), + (6, 'six'), + (7, 'seven'), + } + ) + createTestDbTable( + imgDb, + 'CREATE TABLE imgs' \ + '(name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT)', + 'INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)', + { + ('one','cc-by','alice','anna','','https://upload.wikimedia.org/1.jpg'), + ('two','???','bob','barbara','','https://upload.wikimedia.org/2.png'), + ('three','cc-by-sa','clare','File:?','','https://upload.wikimedia.org/3.gif'), + ('four','cc-by-sa 4.0','dave','dan','all','https://upload.wikimedia.org/4.jpeg'), + ('five','cc0','eve','eric',None,'https://upload.wikimedia.org/5.png'), + ('six','cc-by','','fred','','https://upload.wikimedia.org/6.png'), + } + ) + # Create temp output directory + with tempfile.TemporaryDirectory() as outDir: + # Run + downloadImgs(imgDb, outDir, 0) + # Check + expectedImgs = { + '1.jpg': 'img:https://upload.wikimedia.org/1.jpg', + '5.png': 'img:https://upload.wikimedia.org/5.png', + } + self.assertEqual(set(os.listdir(outDir)), set(expectedImgs.keys())) + for imgName, content in expectedImgs.items(): + self.assertEqual(readTestFile(os.path.join(outDir, imgName)), content) diff --git a/backend/tests/enwiki/test_gen_desc_data.py b/backend/tests/enwiki/test_gen_desc_data.py new file mode 100644 index 0000000..801aa69 --- /dev/null +++ b/backend/tests/enwiki/test_gen_desc_data.py @@ -0,0 +1,37 @@ +import unittest +import os, tempfile + +from tests.common import readTestDbTable +from tol_data.enwiki.gen_desc_data import genData + +TEST_DUMP_FILE = os.path.join(os.path.dirname(__file__), 'sample_enwiki_pages_articles.xml.bz2') + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Run + dbFile = os.path.join(tempDir, 'descData.db') + genData(TEST_DUMP_FILE, dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT id, title FROM pages'), + { + (10, 'AccessibleComputing'), + (13, 'AfghanistanHistory'), + (25, 'Autism'), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT id, target FROM redirects'), + { + (10, 'Computer accessibility'), + (13, 'History of Afghanistan'), + } + ) + descsRows = readTestDbTable(dbFile, 'SELECT id, desc FROM descs') + expectedDescPrefixes = { + 25: 'Kanner autism, or classic autism, is a neurodevelopmental disorder', + } + self.assertEqual({row[0] for row in descsRows}, set(expectedDescPrefixes.keys())) + for id, desc in descsRows: + self.assertTrue(id in expectedDescPrefixes and desc.startswith(expectedDescPrefixes[id])) diff --git a/backend/tests/enwiki/test_gen_dump_index_db.py b/backend/tests/enwiki/test_gen_dump_index_db.py new file mode 100644 index 0000000..e0715f3 --- /dev/null +++ b/backend/tests/enwiki/test_gen_dump_index_db.py @@ -0,0 +1,39 @@ +import unittest +import tempfile, os + +from tests.common import createTestBz2, readTestDbTable +from tol_data.enwiki.gen_dump_index_db import genData + +def runGenData(indexFileContents: str): + """ Sets up index file to be read by genData(), runs it, reads the output database, and returns offset info. """ + with tempfile.TemporaryDirectory() as tempDir: + # Create temp index file + indexFile = os.path.join(tempDir, 'index.txt.bz2') + createTestBz2(indexFile, indexFileContents) + # Run + dbFile = os.path.join(tempDir, 'data.db') + genData(indexFile, dbFile) + # Read db + return readTestDbTable(dbFile, 'SELECT title, id, offset, next_offset FROM offsets') + +class TestGenData(unittest.TestCase): + def setUp(self): + self.maxDiff = None # Remove output-diff size limit + def test_index_file(self): + indexFileContents = ( + '100:10:apple\n' + '100:11:ant\n' + '300:99:banana ice-cream\n' + '1000:2030:Custard!\n' + ) + offsetsMap = runGenData(indexFileContents) + self.assertEqual(offsetsMap, { + ('apple', 10, 100, 300), + ('ant', 11, 100, 300), + ('banana ice-cream', 99, 300, 1000), + ('Custard!', 2030, 1000, -1), + }) + def test_emp_index(self): + offsetsMap = runGenData('') + self.assertEqual(offsetsMap, set()) + pass diff --git a/backend/tests/enwiki/test_gen_img_data.py b/backend/tests/enwiki/test_gen_img_data.py new file mode 100644 index 0000000..1703b78 --- /dev/null +++ b/backend/tests/enwiki/test_gen_img_data.py @@ -0,0 +1,64 @@ +import unittest +import tempfile, os + +from tests.common import createTestDbTable, readTestDbTable +from tol_data.enwiki.gen_img_data import getInputPageIdsFromDb, genData + +TEST_DUMP_FILE = os.path.join(os.path.dirname(__file__), 'sample_enwiki_pages_articles.xml.bz2') + +class TestGetInputPageIdsFromDb(unittest.TestCase): + def test_get(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp tree-of-life db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE wiki_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO wiki_ids VALUES (?, ?)', + { + ('one', 1), + ('and another', 2), + } + ) + # Run + pageIds = getInputPageIdsFromDb(dbFile) + # Check + self.assertEqual(pageIds, {1, 2}) + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp dump-index db + indexDb = os.path.join(tempDir, 'dump_index.db') + createTestDbTable( + indexDb, + 'CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)', + 'INSERT INTO offsets VALUES (?, ?, ?, ?)', + { + ('AccessibleComputing',10,0,-1), + ('AfghanistanHistory',13,0,-1), + ('Autism',25,0,-1), + } + ) + # Run + imgDb = os.path.join(tempDir, 'imgData.db') + genData({10, 25}, TEST_DUMP_FILE, indexDb, imgDb) + # Check + self.assertEqual( + readTestDbTable(imgDb, 'SELECT page_id, img_name from page_imgs'), + { + (10, None), + (25, 'Autism-stacking-cans 2nd edit.jpg'), + } + ) + # Run with updated page-ids set + genData({13, 10}, TEST_DUMP_FILE, indexDb, imgDb) + # Check + self.assertEqual( + readTestDbTable(imgDb, 'SELECT page_id, img_name from page_imgs'), + { + (10, None), + (13, None), + (25, 'Autism-stacking-cans 2nd edit.jpg'), + } + ) diff --git a/backend/tests/enwiki/test_gen_pageview_data.py b/backend/tests/enwiki/test_gen_pageview_data.py new file mode 100644 index 0000000..5002eb0 --- /dev/null +++ b/backend/tests/enwiki/test_gen_pageview_data.py @@ -0,0 +1,44 @@ +import unittest +import tempfile, os + +from tests.common import createTestBz2, createTestDbTable, readTestDbTable +from tol_data.enwiki.gen_pageview_data import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp pageview files + pageviewFiles = [os.path.join(tempDir, 'pageviews1.bz2'), os.path.join(tempDir, 'pageviews2.bz2')] + createTestBz2(pageviewFiles[0], ( + 'aa.wikibooks One null desktop 1 W1\n' + 'en.wikipedia Two null mobile-web 10 A9B1\n' + 'en.wikipedia Three null desktop 4 D3\n' + )) + createTestBz2(pageviewFiles[1], ( + 'fr.wikipedia Four null desktop 12 T6U6\n' + 'en.wikipedia Three null desktop 10 E4G5Z61\n' + )) + # Create temp dump-index db + dumpIndexDb = os.path.join(tempDir, 'dump_index.db') + createTestDbTable( + dumpIndexDb, + 'CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)', + 'INSERT INTO offsets VALUES (?, ?, ?, ?)', + { + ('One', 1, 0, -1), + ('Two', 2, 0, -1), + ('Three', 3, 0, -1), + ('Four', 4, 0, -1), + } + ) + # Run + dbFile = os.path.join(tempDir, 'data.db') + genData(pageviewFiles, dumpIndexDb, dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT title, id, views from views'), + { + ('Two', 2, 5), + ('Three', 3, 7), + } + ) diff --git a/backend/tests/eol/__init__.py b/backend/tests/eol/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/eol/test_download_imgs.py b/backend/tests/eol/test_download_imgs.py new file mode 100644 index 0000000..975d1c7 --- /dev/null +++ b/backend/tests/eol/test_download_imgs.py @@ -0,0 +1,74 @@ +import unittest +from unittest.mock import Mock, patch +import tempfile, os + +from tests.common import readTestFile, createTestDbTable +from tol_data.eol.download_imgs import getEolIdsFromDb, downloadImgs + +class TestGetEolIdsFromDb(unittest.TestCase): + def test_get(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE eol_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO eol_ids VALUES (?, ?)', + { + ('one', 1), + ('a second', 2), + } + ) + # Run + eolIds = getEolIdsFromDb(dbFile) + # Check + self.assertEqual(eolIds, {1, 2}) + +class TestDownloadImgs(unittest.TestCase): + @patch('requests.get', autospec=True) + def test_gen(self, requestsGetMock): + requestsGetMock.side_effect = lambda url: Mock(content=('img:' + url).encode()) + with tempfile.TemporaryDirectory() as tempDir: + eolIds = {1, 2, 4} + # Create temp images-list db + imagesListDb = os.path.join(tempDir, 'images_list.db') + createTestDbTable( + imagesListDb, + 'CREATE TABLE images (content_id INT PRIMARY KEY, page_id INT, source_url TEXT,' \ + ' copy_url TEXT, license TEXT, copyright_owner TEXT)', + 'INSERT INTO images VALUES (?, ?, ?, ?, ?, ?)', + { + (10, 1, '???', 'https://content.eol.org/1.jpg', 'cc-by-sa', 'owner1'), + (20, 2, '', 'https://content.eol.org/2.jpg', 'cc-by', 'owner2'), + (21, 2, '', 'https://content.eol.org/2b.jpg', 'public domain', 'owner2'), + (22, 2, '', 'https://content.eol.org/2c.jpg', '???', 'owner3'), + (23, 2, '', 'data/2d.jpg', 'cc-by-nc', 'owner5'), + (24, 2, '', 'https://content.eol.org/2e', 'cc-by', 'owner6'), + (25, 2, '', 'https://content.eol.org/2f.gif', 'cc-by', 'owner7'), + (30, 3, '', 'https://content.eol.org/3.png', 'cc-by', 'owner3'), + } + ) + # Create temp output dir + with tempfile.TemporaryDirectory() as outDir: + # Run + downloadImgs(eolIds, imagesListDb, outDir) + # Check + expectedImgs1 = { + '1 10.jpg': 'img:https://content.eol.org/1.jpg', + '2 20.jpg': 'img:https://content.eol.org/2.jpg', + '2 23.jpg': 'img:https://content.eol.org/data/2d.jpg', + '2 25.gif': 'img:https://content.eol.org/2f.gif', + } + expectedImgs2 = { + '1 10.jpg': 'img:https://content.eol.org/1.jpg', + '2 21.jpg': 'img:https://content.eol.org/2b.jpg', + '2 23.jpg': 'img:https://content.eol.org/data/2d.jpg', + '2 25.gif': 'img:https://content.eol.org/2f.gif', + } + outImgSet = set(os.listdir(outDir)) + expectedImgSet1 = set(expectedImgs1.keys()) + expectedImgSet2 = set(expectedImgs2.keys()) + self.assertIn(outImgSet, (expectedImgSet1, expectedImgSet2)) + matchingImgs = expectedImgs1 if outImgSet == expectedImgSet1 else expectedImgs2 + for imgName, imgContent in matchingImgs.items(): + self.assertEqual(readTestFile(os.path.join(outDir, imgName)), imgContent) diff --git a/backend/tests/eol/test_gen_images_list_db.py b/backend/tests/eol/test_gen_images_list_db.py new file mode 100644 index 0000000..ca9b495 --- /dev/null +++ b/backend/tests/eol/test_gen_images_list_db.py @@ -0,0 +1,32 @@ +import unittest +import tempfile, os + +from tests.common import createTestFile, readTestDbTable +from tol_data.eol.gen_images_list_db import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp images-list files + imageListsGlob = os.path.join(tempDir, 'imgs-*.csv') + createTestFile(os.path.join(tempDir, 'imgs-1.csv'), ( + 'EOL content ID,EOL page ID,Medium Source URL,EOL Full-Size Copy URL,License Name,Copyright Owner\n' + '1,10,https://example.com/1/,https://content.eol.org/1.jpg,cc-by,owner1\n' + '2,20,https://example2.com/2/,https://content.eol.org/2.jpg,cc-by-sa,owner2\n' + )) + createTestFile(os.path.join(tempDir, 'imgs-2.csv'), ( + '3,30,https://example.com/3/,https://content.eol.org/3.png,public,owner3\n' + )) + # Run + dbFile = os.path.join(tempDir, 'imagesList.db') + genData(imageListsGlob, dbFile) + # Check + self.assertEqual( + readTestDbTable( + dbFile, 'SELECT content_id, page_id, source_url, copy_url, license, copyright_owner from images'), + { + (1, 10, 'https://example.com/1/', 'https://content.eol.org/1.jpg', 'cc-by', 'owner1'), + (2, 20, 'https://example2.com/2/', 'https://content.eol.org/2.jpg', 'cc-by-sa', 'owner2'), + (3, 30, 'https://example.com/3/', 'https://content.eol.org/3.png', 'public', 'owner3'), + } + ) diff --git a/backend/tests/eol/test_review_imgs.py b/backend/tests/eol/test_review_imgs.py new file mode 100644 index 0000000..49c09bb --- /dev/null +++ b/backend/tests/eol/test_review_imgs.py @@ -0,0 +1,46 @@ +import unittest +import tempfile, os, shutil + +from tests.common import createTestDbTable +from tol_data.eol.review_imgs import reviewImgs + +CLICK_IMG = os.path.join(os.path.dirname(__file__), '..', 'green.png') +AVOID_IMG = os.path.join(os.path.dirname(__file__), '..', 'red.png') + +class TestReviewImgs(unittest.TestCase): + def test_review(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create input images + imgDir = os.path.join(tempDir, 'imgs_for_review') + os.mkdir(imgDir) + shutil.copy(CLICK_IMG, os.path.join(imgDir, '1 10.jpg')) + shutil.copy(CLICK_IMG, os.path.join(imgDir, '2 20.jpeg')) + shutil.copy(AVOID_IMG, os.path.join(imgDir, '2 21.gif')) + shutil.copy(AVOID_IMG, os.path.join(imgDir, '2 22.jpg')) + shutil.copy(AVOID_IMG, os.path.join(imgDir, '3 30.png')) + shutil.copy(AVOID_IMG, os.path.join(imgDir, '3 31.jpg')) + # Create temp extra-info db + extraInfoDb = os.path.join(tempDir, 'data.db') + createTestDbTable( + extraInfoDb, + 'CREATE TABLE eol_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO eol_ids VALUES (?, ?)', + { + ('one', 1), + ('two', 2), + ('three', 3), + } + ) + createTestDbTable( + extraInfoDb, + 'CREATE TABLE names(name TEXT, alt_name TEXT, pref_alt INT, src TEXT, PRIMARY KEY(name, alt_name))', + 'INSERT OR IGNORE INTO names VALUES (?, ?, ?, ?)', + { + ('two','II',1,'eol'), + } + ) + # Run + outDir = os.path.join(tempDir, 'imgs') + reviewImgs(imgDir, outDir, extraInfoDb) + # Check + self.assertEqual(set(os.listdir(outDir)), {'1 10.jpg', '2 20.jpeg'}) diff --git a/backend/tests/green.png b/backend/tests/green.png new file mode 100644 index 0000000..d4f15c9 Binary files /dev/null and b/backend/tests/green.png differ diff --git a/backend/tests/red.png b/backend/tests/red.png new file mode 100644 index 0000000..7828e96 Binary files /dev/null and b/backend/tests/red.png differ diff --git a/backend/tests/test_gen_desc_data.py b/backend/tests/test_gen_desc_data.py new file mode 100644 index 0000000..cc0582d --- /dev/null +++ b/backend/tests/test_gen_desc_data.py @@ -0,0 +1,101 @@ +import unittest +import tempfile, os + +from tests.common import createTestDbTable, readTestDbTable +from tol_data.gen_desc_data import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp dbpedia db + dbpediaDb = os.path.join(tempDir, 'dbp_descs.db') + createTestDbTable( + dbpediaDb, + 'CREATE TABLE ids (iri TEXT PRIMARY KEY, id INT)', + 'INSERT INTO ids VALUES (?, ?)', + { + ('', 1), + ('', 2), + ('', 3), + } + ) + createTestDbTable( + dbpediaDb, + 'CREATE TABLE redirects (iri TEXT PRIMARY KEY, target TEXT)', + 'INSERT INTO redirects VALUES (?, ?)', + { + ('', ''), + } + ) + createTestDbTable( + dbpediaDb, + 'CREATE TABLE abstracts (iri TEXT PRIMARY KEY, abstract TEXT)', + 'INSERT INTO abstracts VALUES (?, ?)', + { + ('', 'One from dbp'), + ('', 'Two from dbp'), + ('', 'Three from dbp'), + } + ) + # Create temp enwiki db + enwikiDb = os.path.join(tempDir, 'enwiki_descs.db') + createTestDbTable( + enwikiDb, + 'CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)', + 'INSERT INTO pages VALUES (?, ?)', + { + (1, 'I'), + (3, 'III'), + (4, 'IV'), + (5, 'V'), + (6, 'VI'), + } + ) + createTestDbTable( + enwikiDb, + 'CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)', + 'INSERT INTO redirects VALUES (?, ?)', + { + (5, 'IV'), + } + ) + createTestDbTable( + enwikiDb, + 'CREATE TABLE descs (id INT PRIMARY KEY, desc TEXT)', + 'INSERT INTO descs VALUES (?, ?)', + { + (1, 'One from enwiki'), + (3, 'Three from enwiki'), + (4, 'Four from enwiki'), + (5, 'Five from enwiki'), + } + ) + # Create temp tree-of-life db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE wiki_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO wiki_ids VALUES (?, ?)', + { + ('first', 1), + ('second', 2), + ('third', 3), + ('fourth', 4), + ('fifth', 5), + ('sixth', 6), + ('seventh', 7), + } + ) + # Run + genData(dbpediaDb, enwikiDb, dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT wiki_id, desc, from_dbp from descs'), + { + (1, 'One from dbp', 1), + (2, 'Three from dbp', 1), + (3, 'Three from dbp', 1), + (4, 'Four from enwiki', 0), + (5, 'Four from enwiki', 0), + } + ) diff --git a/backend/tests/test_gen_imgs.py b/backend/tests/test_gen_imgs.py new file mode 100644 index 0000000..1ddd438 --- /dev/null +++ b/backend/tests/test_gen_imgs.py @@ -0,0 +1,125 @@ +import unittest +from unittest.mock import patch +import tempfile, os, shutil + +from tests.common import createTestFile, createTestDbTable, readTestDbTable +from tol_data.gen_imgs import genImgs + +TEST_IMG = os.path.join(os.path.dirname(__file__), 'green.png') + +class TestGenImgs(unittest.TestCase): + @patch('tol_data.gen_imgs.convertImage', autospec=True) + def test_gen(self, convertImageMock): + with tempfile.TemporaryDirectory() as tempDir: + convertImageMock.side_effect = \ + lambda imgPath, outPath: shutil.copy(imgPath, outPath) + # Create temp EOL images + eolImgDir = os.path.join(tempDir, 'eol_imgs') + os.mkdir(eolImgDir) + shutil.copy(TEST_IMG, os.path.join(eolImgDir, '1 10.jpg')) + shutil.copy(TEST_IMG, os.path.join(eolImgDir, '2 20.png')) + shutil.copy(TEST_IMG, os.path.join(eolImgDir, '5 50.jpg')) + # Create temp EOL image db + eolImgDb = os.path.join(tempDir, 'eol_imgs.db') + createTestDbTable( + eolImgDb, + 'CREATE TABLE images (content_id INT PRIMARY KEY, page_id INT, source_url TEXT,' \ + ' copy_url TEXT, license TEXT, copyright_owner TEXT)', + 'INSERT INTO images VALUES (?, ?, ?, ?, ?, ?)', + { + (10, 1, 'https://example.com/1.jpg', '', 'cc-by', 'eol owner1'), + (20, 2, 'https://example.com/2.png', '', 'cc-by-sa', 'eol owner2'), + (50, 5, 'https://example.com/5.jpg', '', 'cc-by-sa', 'eol owner3'), + } + ) + # Create temp enwiki images + enwikiImgDir = os.path.join(tempDir, 'enwiki_imgs') + os.mkdir(enwikiImgDir) + shutil.copy(TEST_IMG, os.path.join(enwikiImgDir, '100.jpg')) + shutil.copy(TEST_IMG, os.path.join(enwikiImgDir, '200.jpeg')) + shutil.copy(TEST_IMG, os.path.join(enwikiImgDir, '400.png')) + # Create temp enwiki image db + enwikiImgDb = os.path.join(tempDir, 'enwiki_imgs.db') + createTestDbTable( + enwikiImgDb, + 'CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)', + 'INSERT INTO page_imgs VALUES (?, ?)', + { + (100, 'one.jpg'), + (200, 'two.jpeg'), + (300, 'two.jpeg'), + (400, 'two.jpeg'), + } + ) + createTestDbTable( + enwikiImgDb, + 'CREATE TABLE imgs (' \ + 'name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT)', + 'INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)', + { + ('one.jpg', 'CC BY-SA 3.0', 'author1', 'credits1', '', 'https://upload.wikimedia.org/one.jpg'), + ('two.jpeg', 'cc-by', 'author2', 'credits2', '', 'https://upload.wikimedia.org/two.jpeg'), + ('four.png', 'cc0', 'author3', '', '', 'https://upload.wikimedia.org/x.png'), + } + ) + # Create temp picked-images file + pickedImgsFile = os.path.join(tempDir, 'img_data.txt') + createTestFile(pickedImgsFile, ( + 'node5.jpg|url1|cc-by-sa 4.0|artist1|credit1\n' + )) + # Create temp picked-images + pickedImgDir = os.path.join(tempDir, 'picked_imgs') + os.mkdir(pickedImgDir) + shutil.copy(TEST_IMG, os.path.join(pickedImgDir, 'node5.jpg')) + # Create temp img-list file + imgListFile = os.path.join(tempDir, 'img_list.txt') + createTestFile(imgListFile, ( + 'ott1 ' + os.path.join(eolImgDir, '1 10.jpg') + '\n' + 'ott2 ' + os.path.join(enwikiImgDir, '200.jpeg') + '\n' + 'ott3\n' + 'ott4 ' + os.path.join(enwikiImgDir, '400.png') + '\n' + 'ott5 ' + os.path.join(eolImgDir, '5 50.jpg') + '\n' + )) + # Create temp tree-of-life db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE nodes (name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT)', + 'INSERT INTO nodes VALUES (?, ?, ?)', + { + ('node1', 'ott1', 1), + ('node2', 'ott2', 1), + ('node3', 'ott3', 2), + ('node4', 'ott4', 4), + ('node5', 'ott5', 1), + ('node6', 'ott6', 10), + } + ) + # Run + outDir = os.path.join(tempDir, 'img') + genImgs(imgListFile, eolImgDir, outDir, eolImgDb, enwikiImgDb, pickedImgDir, pickedImgsFile, dbFile) + # Check + self.assertEqual(set(os.listdir(outDir)), { + 'ott1.jpg', + 'ott2.jpg', + 'ott4.jpg', + 'ott5.jpg', + }) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, img_id, src from node_imgs'), + { + ('node1', 1, 'eol'), + ('node2', 200, 'enwiki'), + ('node4', 400, 'enwiki'), + ('node5', 1, 'picked'), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT id, src, url, license, artist, credit from images'), + { + (1, 'eol', 'https://example.com/1.jpg', 'cc-by', 'eol owner1', ''), + (200, 'enwiki', 'https://en.wikipedia.org/wiki/File:two.jpeg', 'cc-by', 'author2', 'credits2'), + (400, 'enwiki', 'https://en.wikipedia.org/wiki/File:two.jpeg', 'cc-by', 'author2', 'credits2'), + (1, 'picked', 'url1', 'cc-by-sa 4.0', 'artist1', 'credit1'), + } + ) diff --git a/backend/tests/test_gen_linked_imgs.py b/backend/tests/test_gen_linked_imgs.py new file mode 100644 index 0000000..b989407 --- /dev/null +++ b/backend/tests/test_gen_linked_imgs.py @@ -0,0 +1,84 @@ +import unittest +import tempfile, os + +from tests.common import createTestDbTable, readTestDbTable +from tol_data.gen_linked_imgs import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp tree-of-life db + # Test tree ('I' means a node has an image): + # one -> two -> sixI + # -> seven + # -> eight + # -> threeI + # -> [nine + ten] -> nineI + # -> ten + # -> fiveI -> [twelve + thirteen] -> twelveI + # -> thirteenI + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE nodes (name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT)', + 'INSERT INTO nodes VALUES (?, ?, ?)', + { + ('one', 'ott1', 8), + ('two', 'ott2', 3), + ('three', 'ott3', 1), + ('[nine + ten]', 'ott4', 2), + ('five', 'ott5', 2), + ('six', 'ott6', 1), + ('seven', 'ott7', 1), + ('eight', 'ott8', 1), + ('nine', 'ott9', 1), + ('ten', 'ott10', 1), + ('[twelve + thirteen]', 'ott11', 2), + ('twelve', 'ott12', 1), + ('thirteen', 'ott13', 1), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE edges (parent TEXT, child TEXT, p_support INT, PRIMARY KEY (parent, child))', + 'INSERT INTO edges VALUES (?, ?, ?)', + { + ('one', 'two', 1), + ('one', 'three', 1), + ('one', '[nine + ten]', 0), + ('one', 'five', 1), + ('two', 'six', 1), + ('two', 'seven', 1), + ('two', 'eight', 0), + ('[nine + ten]', 'nine', 0), + ('[nine + ten]', 'ten', 1), + ('five', '[twelve + thirteen]', 1), + ('[twelve + thirteen]', 'twelve', 1), + ('[twelve + thirteen]', 'thirteen', 0), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE node_imgs (name TEXT PRIMARY KEY, img_id INT, src TEXT)', + 'INSERT INTO node_imgs VALUES (?, ?, ?)', + { + ('six', 1, 'eol'), + ('three', 10, 'enwiki'), + ('nine', 1, 'picked'), + ('five', 2, 'eol'), + ('twelve', 11, 'enwiki'), + ('thirteen', 12, 'enwiki'), + } + ) + # Run + genData(dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, otol_ids from linked_imgs'), + { + ('one', 'ott6'), + ('two', 'ott6'), + ('[nine + ten]', 'ott9,'), + ('[twelve + thirteen]', 'ott12,ott13'), + } + ) diff --git a/backend/tests/test_gen_mapping_data.py b/backend/tests/test_gen_mapping_data.py new file mode 100644 index 0000000..9aa99b7 --- /dev/null +++ b/backend/tests/test_gen_mapping_data.py @@ -0,0 +1,302 @@ +import unittest +import tempfile, os + +from tests.common import createTestFile, createTestGzip, createTestDbTable, readTestDbTable +from tol_data.gen_mapping_data import \ + genData, readTaxonomyFile, readEolIdsFile, readWikidataDb, readPickedMappings, getEnwikiPageIds + +class TestReadTaxonomyFile(unittest.TestCase): + def test_read(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp taxonomy file + taxonomyFile = os.path.join(tempDir, 'taxonomy.tsv') + SEP = '\t|\t' + createTestFile(taxonomyFile, ''.join([ + SEP.join(['uid', 'parent_uid', 'name', 'rank', 'sourceinfo', 'uniqueName', 'flags', '\n']), + SEP.join(['1', '2', 'one', 'species', 'ncbi:10', '', '', '\n']), + SEP.join(['2', '3', 'two', 'genus', 'ncbi:20,gbif:1', 'bananas', '', '\n']), + SEP.join(['10', '20', 'ten', 'family', 'if:10,if:100', '', '', '\n']), + SEP.join(['11', '100', 'eleven', '', 'igloo:1,ncbi:?', '', '', '\n']) + ])) + # Run + nodeToSrcIds = {} + usedSrcIds = set() + readTaxonomyFile(taxonomyFile, nodeToSrcIds, usedSrcIds) + # Check + self.assertEqual(nodeToSrcIds, { + 1: {'ncbi': 10}, + 2: {'ncbi': 20, 'gbif': 1}, + 10: {'if': 10}, + }) + self.assertEqual(usedSrcIds, { + ('ncbi', 10), + ('ncbi', 20), + ('gbif', 1), + ('if', 10) + }) +class TestReadEolIdsFile(unittest.TestCase): + def test_read(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp EOL IDs file + eolIdsFile = os.path.join(tempDir, 'ids.csv.gz') + createTestGzip(eolIdsFile, ( + 'node_id,resource_pk,resource_id,page_id,preferred_canonical_for_page\n' + '0,10,676,1,rhubarb\n' # EOL ID 1 with ncbi ID 10 + '0,99,767,2,nothing\n' # EOL ID 2 with worms ID 99 + '0,234,459,100,goat\n' # EOL ID 100 with gbif ID 234 + '0,23,676,101,lemon\n' # EOL ID 101 with ncbi ID 23 + )) + # Create input maps + nodeToSrcIds = { + 10: {'ncbi': 10}, + 20: {'ncbi': 23, 'gbif': 234} + } + # Run + usedSrcIds = {('ncbi', 10), ('gbif', 234), ('ncbi', 23)} + nodeToEolId = {} + readEolIdsFile(eolIdsFile, nodeToSrcIds, usedSrcIds, nodeToEolId) + # Check + self.assertEqual(nodeToEolId, { + 10: 1, + 20: 101, + }) +class TestReadWikidataDb(unittest.TestCase): + def test_read(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp wikidata db + wikidataDb = os.path.join(tempDir, 'taxon_srcs.db') + createTestDbTable( + wikidataDb, + 'CREATE TABLE src_id_to_title (src TEXT, id INT, title TEXT, PRIMARY KEY(src, id))', + 'INSERT INTO src_id_to_title VALUES (?, ?, ?)', + [ + ('ncbi', 1, 'one'), + ('ncbi', 11, 'two'), + ('gbif', 21, 'three'), + ('if', 31, 'three'), + ('ncbi', 2, 'four'), + ('gbif', 1, 'five'), + ('eol', 1, 'one'), + ('eol', 2, 'three'), + ('ncbi', 100, 'six'), + ] + ) + createTestDbTable( + wikidataDb, + 'CREATE TABLE title_iucn (title TEXT PRIMARY KEY, status TEXT)', + 'INSERT INTO title_iucn VALUES (?, ?)', + [ + ('one', 'least concern'), + ('three', 'vulnerable'), + ('six', 'extinct in the wild'), + ] + ) + # Create input maps + nodeToSrcIds = { + 10: {'ncbi': 1}, + 20: {'ncbi': 11, 'gbif': 21, 'if': 31}, + 30: {'ncbi': 2, 'gbif': 1}, + 40: {'ncbi': 99}, + } + usedSrcIds = { + ('ncbi', 1), ('ncbi', 2), ('gbif', 1), ('ncbi', 11), ('gbif', 21), ('if', 31), + ('eol', 10), ('ncbi', 99) + } + nodeToEolId = { + 20: 100, + } + # Run + nodeToWikiTitle = {} + titleToIucnStatus = {} + readWikidataDb(wikidataDb, nodeToSrcIds, usedSrcIds, nodeToWikiTitle, titleToIucnStatus, nodeToEolId) + # Check + self.assertEqual(nodeToWikiTitle, { + 10: 'one', + 20: 'three', + 30: 'four', + }) + self.assertEqual(titleToIucnStatus, { + 'one': 'least concern', + 'three': 'vulnerable', + }) + self.assertEqual(nodeToEolId, { + 10: 1, + 20: 100, + }) +class TestReadPickedMappings(unittest.TestCase): + def test_read(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp picked-mappings files + pickedMappings = {'eol': ['1.txt'], 'enwiki': ['2.txt', '3.txt']} + pickedMappingsContent = {'eol': [''], 'enwiki': ['', '']} + pickedMappingsContent['eol'][0] = ( + '10|100\n' + '20|202\n' + ) + pickedMappingsContent['enwiki'][0] = ( + '12|abc\n' + '23|def\n' + ) + pickedMappingsContent['enwiki'][1] = ( + '15|ghi\n' + '35|jkl\n' + ) + for src in pickedMappings: + for idx in range(len(pickedMappings[src])): + pickedMappings[src][idx] = os.path.join(tempDir, pickedMappings[src][idx]) + createTestFile(pickedMappings[src][idx], pickedMappingsContent[src][idx]) + # Create input maps + nodeToEolId = { + 1: 1, + 10: 66, + } + nodeToWikiTitle = { + 10: 'one', + 12: 'two', + 35: 'goanna', + } + # Run + readPickedMappings(pickedMappings, nodeToEolId, nodeToWikiTitle) + # Check + self.assertEqual(nodeToEolId, { + 1: 1, + 10: 100, + 20: 202, + }) + self.assertEqual(nodeToWikiTitle, { + 10: 'one', + 12: 'abc', + 23: 'def', + 15: 'ghi', + 35: 'jkl', + }) +class TestReadGetEnwikiPageIds(unittest.TestCase): + def test_read(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp dump index + dumpIndexDb = os.path.join(tempDir, 'dump_index.db') + createTestDbTable( + dumpIndexDb, + 'CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)', + 'INSERT INTO offsets VALUES (?, ?, ?, ?)', + [ + ('one', 1, 10, 100), + ('two', 22, 10, 100), + ('four', 3, 1000, 2000), + ] + ) + # Create input maps + nodeToWikiTitle = { + 10: 'one', + 20: 'two', + 30: 'three', + } + # Run + titleToPageId = {} + getEnwikiPageIds(dumpIndexDb, nodeToWikiTitle, titleToPageId) + # Check + self.assertEqual(titleToPageId, { + 'one': 1, + 'two': 22, + }) +class TestGenData(unittest.TestCase): + def test_mapping(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp taxonomy file + taxonomyFile = os.path.join(tempDir, 'taxonomy.tsv') + SEP = '\t|\t' + createTestFile(taxonomyFile, ''.join([ + SEP.join(['uid', 'parent_uid', 'name', 'rank', 'sourceinfo', 'uniqueName', 'flags', '\n']), + SEP.join(['1', '', '', '', 'ncbi:10', '', '', '\n']), + SEP.join(['2', '', '', '', 'ncbi:20,gbif:1', '', '', '\n']), + SEP.join(['3', '', '', '', 'ncbi:30,if:2', '', '', '\n']), + ])) + # Create temp EOL IDs file + eolIdsFile = os.path.join(tempDir, 'ids.csv.gz') + createTestGzip(eolIdsFile, ( + 'node_id,resource_pk,resource_id,page_id,preferred_canonical_for_page\n' + '0,10,676,1,\n' # EOL ID 1 with ncbi ID 10 + '0,30,676,2,\n' # EOL ID 2 with ncbi ID 30 + )) + # Create temp wikidata db + wikidataDb = os.path.join(tempDir, 'taxon_srcs.db') + createTestDbTable( + wikidataDb, + 'CREATE TABLE src_id_to_title (src TEXT, id INT, title TEXT, PRIMARY KEY(src, id))', + 'INSERT INTO src_id_to_title VALUES (?, ?, ?)', + [ + ('ncbi', 10, 'one'), + ('gbif', 1, 'two'), + ('eol', 100, 'two'), + ('if', 2, 'three'), + ] + ) + createTestDbTable( + wikidataDb, + 'CREATE TABLE title_iucn (title TEXT PRIMARY KEY, status TEXT)', + 'INSERT INTO title_iucn VALUES (?, ?)', + [ + ('one', 'least concern'), + ('three', 'vulnerable'), + ] + ) + # Create temp picked-mappings files + pickedMappings = {'eol': [], 'enwiki': ['w_ids.txt']} + pickedMappingsContent = {'eol': [], 'enwiki': ['']} + pickedMappingsContent['enwiki'][0] = ( + '3|four\n' + ) + for src in pickedMappings: + for idx in range(len(pickedMappings[src])): + pickedMappings[src][idx] = os.path.join(tempDir, pickedMappings[src][idx]) + createTestFile(pickedMappings[src][idx], pickedMappingsContent[src][idx]) + # Create temp dump index + dumpIndexDb = os.path.join(tempDir, 'dump_index.db') + createTestDbTable( + dumpIndexDb, + 'CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)', + 'INSERT INTO offsets VALUES (?, ?, ?, ?)', + [ + ('one', 1000, 1, 2), + ('two', 2000, 1, 2), + ('three', 3000, 1, 2), + ('four', 4000, 1, 2), + ] + ) + # Create temp tree-of-life db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE nodes (name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT)', + 'INSERT INTO nodes VALUES (?, ?, ?)', + [ + ('first', 'ott1', 10), + ('second', 'ott2', 1), + ('third', 'ott3', 2), + ] + ) + # Run + genData(taxonomyFile, eolIdsFile, wikidataDb, pickedMappings, dumpIndexDb, dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, id from eol_ids'), + { + ('first', 1), + ('second', 100), + ('third', 2), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, id from wiki_ids'), + { + ('first', 1000), + ('second', 2000), + ('third', 4000), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, iucn from node_iucn'), + { + ('first', 'least concern'), + } + ) diff --git a/backend/tests/test_gen_name_data.py b/backend/tests/test_gen_name_data.py new file mode 100644 index 0000000..85e81d8 --- /dev/null +++ b/backend/tests/test_gen_name_data.py @@ -0,0 +1,93 @@ +import unittest +import tempfile, os + +from tests.common import createTestFile, createTestDbTable, readTestDbTable +from tol_data.gen_name_data import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp eol names file + eolNamesFile = os.path.join(tempDir, 'vernacular_names.csv') + createTestFile(eolNamesFile, ( + 'page_id,,vernacular_string,language_code,,,is_preferred_by_eol\n' + '10,,cat,eng,,,preferred\n' + '10,,kitty,eng,,,\n' + '20,,apple,eng,,,preferred\n' + '20,,pomme,fr,,,preferred\n' + '20,,apples,eng,,,\n' + '30,,those things with wings,eng,,,\n' + )) + # Create temp enwiki db + enwikiDb = os.path.join(tempDir, 'desc_data.db') + createTestDbTable( + enwikiDb, + 'CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)', + 'INSERT INTO pages VALUES (?, ?)', + [ + (1, 'abc'), + (2, 'def'), + (3, 'ghi'), + ] + ) + createTestDbTable( + enwikiDb, + 'CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)', + 'INSERT INTO redirects VALUES (?, ?)', + [ + (3, 'abc'), + (4, 'def'), + ] + ) + # Create temp picked-names file + pickedNamesFile = os.path.join(tempDir, 'picked_names.txt') + createTestFile(pickedNamesFile, ( + 'three|xxx|1\n' + 'one|kitty|\n' + 'two|two|\n' + )) + # Create temp db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE nodes (name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT)', + 'INSERT INTO nodes VALUES (?, ?, ?)', + [ + ('one', 'ott1', 1), + ('two', 'ott2', 1), + ('three', 'ott3', 1), + ] + ) + createTestDbTable( + dbFile, + 'CREATE TABLE eol_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO eol_ids VALUES (?, ?)', + [ + ('one', 10), + ('two', 20), + ('three', 30), + ] + ) + createTestDbTable( + dbFile, + 'CREATE TABLE wiki_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO wiki_ids VALUES (?, ?)', + [ + ('one', 1), + ('two', 3), + ('three', 2), + ] + ) + # Run + genData(eolNamesFile, enwikiDb, pickedNamesFile, dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, alt_name, pref_alt, src FROM names'), + { + ('one', 'cat', 1, 'eol'), + ('one', 'ghi', 0, 'enwiki'), + ('two', 'apple', 0, 'eol'), + ('two', 'apples', 0, 'eol'), + ('three', 'xxx', 1, 'picked'), + } + ) diff --git a/backend/tests/test_gen_otol_data.py b/backend/tests/test_gen_otol_data.py new file mode 100644 index 0000000..25e65e3 --- /dev/null +++ b/backend/tests/test_gen_otol_data.py @@ -0,0 +1,118 @@ +import unittest +import tempfile, os + +from tests.common import createTestFile, readTestDbTable +from tol_data.gen_otol_data import genData + +def runGenData(treeFileContents: str, annFileContents: str, pickedFileContents: str): + """ Sets up files to be read by genData(), runs it, reads the output database, and returns node+edge info """ + with tempfile.TemporaryDirectory() as tempDir: + # Create temp tree file + treeFile = os.path.join(tempDir, 'tree.tre') + createTestFile(treeFile, treeFileContents) + # Create temp annotations file + annFile = os.path.join(tempDir, 'ann.json') + createTestFile(annFile, annFileContents) + # Create temp picked names file + pickedFile = os.path.join(tempDir, 'pn.txt') + createTestFile(pickedFile, pickedFileContents) + # Run genData() + dbFile = os.path.join(tempDir, 'data.db') + genData(treeFile, annFile, pickedFile, dbFile) + # Read database + nodes = readTestDbTable(dbFile, 'SELECT name, id, tips FROM nodes') + edges = readTestDbTable(dbFile, 'SELECT parent, child, p_support FROM edges') + return nodes, edges + +class TestGenData(unittest.TestCase): + def setUp(self): + self.maxDiff = None # Remove output-diff size limit + def test_newick(self): + treeFileContents = """ + ( + 'land plants ott2', + ( + 'TRAVELLER''s tree ott100', + (domestic_banana_ott4, (lemon_ott6, orange_ott7)citrus_ott5)mrcaott4ott5 + ) mrcaott100ott4, + 'Highly Unu2u8| name!! ott999', + 'citrus ott230' + )cellular_organisms_ott1;""" + annFileContents = '{"nodes": {}}' + pickedFileContents = '' + nodes, edges = runGenData(treeFileContents, annFileContents, pickedFileContents) + self.assertEqual(nodes, { + ('land plants', 'ott2', 1), + ('traveller\'s tree', 'ott100', 1), + ('domestic banana', 'ott4', 1), + ('lemon', 'ott6', 1), + ('orange', 'ott7', 1), + ('citrus', 'ott5', 2), + ('[citrus + domestic banana]', 'mrcaott4ott5', 3), + ('[citrus + traveller\'s tree]', 'mrcaott100ott4', 4), + ('highly unu2u8| name!! ', 'ott999', 1), + ('citrus [2]', 'ott230', 1), + ('cellular organisms', 'ott1', 7), + }) + self.assertEqual(edges, { + ('cellular organisms', 'land plants', 0), + ('cellular organisms', '[citrus + traveller\'s tree]', 0), + ('cellular organisms', 'highly unu2u8| name!! ', 0), + ('cellular organisms', 'citrus [2]', 0), + ('[citrus + traveller\'s tree]', 'traveller\'s tree', 0), + ('[citrus + traveller\'s tree]', '[citrus + domestic banana]', 0), + ('[citrus + domestic banana]', 'domestic banana', 0), + ('[citrus + domestic banana]', 'citrus', 0), + ('citrus', 'lemon', 0), + ('citrus', 'orange', 0), + }) + def test_newick_invalid(self): + with self.assertRaises(Exception): + runGenData('(A,B,(C,D));', '{"nodes": {}}', '') + def test_annotations(self): + treeFileContents = '(two_ott2, three_ott3, four_ott4)one_ott1;' + annFileContents = """ + { + "date_completed": "xxx", + "nodes": { + "ott3": { + "supported_by": { + "tree1": "node1" + } + }, + "ott4": { + "supported_by": { + "tree1": "node2", + "tree2": "node100" + }, + "conflicts_with": { + "tree3": ["x", "y"] + } + } + } + }""" + nodes, edges = runGenData(treeFileContents, annFileContents, '') + self.assertEqual(nodes, { + ('one', 'ott1', 3), + ('two', 'ott2', 1), + ('three', 'ott3', 1), + ('four', 'ott4', 1), + }) + self.assertEqual(edges, { + ('one', 'two', 0), + ('one', 'three', 1), + ('one', 'four', 0), + }) + def test_picked_names_file(self): + treeFileContents = '(one_ott2, two_ott3)one_ott1;' + pickedFileContents = 'one|ott2' + nodes, edges = runGenData(treeFileContents, '{"nodes": {}}', pickedFileContents) + self.assertEqual(nodes, { + ('one [2]', 'ott1', 2), + ('one', 'ott2', 1), + ('two', 'ott3', 1), + }) + self.assertEqual(edges, { + ('one [2]', 'one', 0), + ('one [2]', 'two', 0), + }) diff --git a/backend/tests/test_gen_pop_data.py b/backend/tests/test_gen_pop_data.py new file mode 100644 index 0000000..dd1cb22 --- /dev/null +++ b/backend/tests/test_gen_pop_data.py @@ -0,0 +1,42 @@ +import unittest +import tempfile, os + +from tests.common import createTestDbTable, readTestDbTable +from tol_data.gen_pop_data import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp pageviews db + pageviewsDb = os.path.join(tempDir, 'pageview_data.db') + createTestDbTable( + pageviewsDb, + 'CREATE TABLE views (title TEXT PRIMARY KEY, id INT, views INT)', + 'INSERT INTO views VALUES (?, ?, ?)', + { + ('one', 1, 10), + ('two', 2, 20), + ('three', 3, 30), + } + ) + # Create temp tree-of-life db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE wiki_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO wiki_ids VALUES (?, ?)', + { + ('node1', 1), + ('node3', 3), + } + ) + # Run + genData(pageviewsDb, dbFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, pop from node_pop'), + { + ('node1', 10), + ('node3', 30) + } + ) diff --git a/backend/tests/test_gen_reduced_trees.py b/backend/tests/test_gen_reduced_trees.py new file mode 100644 index 0000000..2ae4dfd --- /dev/null +++ b/backend/tests/test_gen_reduced_trees.py @@ -0,0 +1,166 @@ +import unittest +import tempfile, os + +from tests.common import createTestFile, createTestDbTable, readTestDbTable +from tol_data.gen_reduced_trees import genData + +class TestGenData(unittest.TestCase): + def test_gen(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp tree-of-life db + # Test tree (P/I/L/D means picked/image/linked_image/desc): + # one -> two -> threeI -> four + # -> fiveP + # -> [seven + eight] -> sevenD + # -> eightP + # -> nine -> tenI + # -> elevenL + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE nodes (name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT)', + 'INSERT INTO nodes VALUES (?, ?, ?)', + { + ('one', 'ott1', 6), + ('two', 'ott2', 2), + ('three', 'ott3', 1), + ('four', 'ott4', 1), + ('five', 'ott5', 1), + ('[seven + eight]', 'ott6', 2), + ('seven', 'ott7', 1), + ('eight', 'ott8', 1), + ('nine', 'ott9', 1), + ('ten', 'ott10', 1), + ('eleven', 'ott11', 1), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE edges (parent TEXT, child TEXT, p_support INT, PRIMARY KEY (parent, child))', + 'INSERT INTO edges VALUES (?, ?, ?)', + { + ('one', 'two', 1), + ('two', 'three', 1), + ('three', 'four', 0), + ('two', 'five', 0), + ('one', '[seven + eight]', 1), + ('[seven + eight]', 'seven', 0), + ('[seven + eight]', 'eight', 1), + ('one', 'nine', 1), + ('nine', 'ten', 0), + ('one', 'eleven', 1), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE names(name TEXT, alt_name TEXT, pref_alt INT, src TEXT, PRIMARY KEY(name, alt_name))', + 'INSERT INTO names VALUES (?, ?, ?, ?)', + { + ('eight', 'VIII', 1, 'eol'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE wiki_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO wiki_ids VALUES (?, ?)', + { + ('seven', 10), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE descs (wiki_id INT PRIMARY KEY, desc TEXT, from_dbp INT)', + 'INSERT INTO descs VALUES (?, ?, ?)', + { + (10, 'Seven prefers orange juice', 1), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE node_imgs (name TEXT PRIMARY KEY, img_id INT, src TEXT)', + 'INSERT INTO node_imgs VALUES (?, ?, ?)', + { + ('three', 1, 'eol'), + ('ten', 10, 'enwiki'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE linked_imgs (name TEXT PRIMARY KEY, otol_ids TEXT)', + 'INSERT INTO linked_imgs VALUES (?, ?)', + { + ('eleven', 'ott3'), + } + ) + # Create temp picked-nodes file + pickedNodesFile = os.path.join(tempDir, 'picked_nodes.txt') + createTestFile(pickedNodesFile, ( + 'five\n' + 'VIII\n' + )) + # Run + genData(None, dbFile, pickedNodesFile) + # Check + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, id, tips from nodes_p'), + { + ('one', 'ott1', 3), + ('five', 'ott5', 1), + ('eight', 'ott8', 1), + ('eleven', 'ott11', 1), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT parent, child, p_support from edges_p'), + { + ('one', 'five', 0), + ('one', 'eight', 1), + ('one', 'eleven', 1), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, id, tips from nodes_i'), + { + ('one', 'ott1', 4), + ('two', 'ott2', 2), + ('three', 'ott3', 1), + ('five', 'ott5', 1), + ('eight', 'ott8', 1), + ('ten', 'ott10', 1), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT parent, child, p_support from edges_i'), + { + ('one', 'two', 1), + ('two', 'three', 1), + ('two', 'five', 0), + ('one', 'eight', 1), + ('one', 'ten', 0), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT name, id, tips from nodes_t'), + { + ('one', 'ott1', 5), + ('two', 'ott2', 2), + ('three', 'ott3', 1), + ('five', 'ott5', 1), + ('[seven + eight]', 'ott6', 2), + ('seven', 'ott7', 1), + ('eight', 'ott8', 1), + ('ten', 'ott10', 1), + } + ) + self.assertEqual( + readTestDbTable(dbFile, 'SELECT parent, child, p_support from edges_t'), + { + ('one', 'two', 1), + ('two', 'three', 1), + ('two', 'five', 0), + ('one', '[seven + eight]', 1), + ('[seven + eight]', 'seven', 0), + ('[seven + eight]', 'eight', 1), + ('one', 'ten', 0), + } + ) diff --git a/backend/tests/test_review_imgs_to_gen.py b/backend/tests/test_review_imgs_to_gen.py new file mode 100644 index 0000000..d88523b --- /dev/null +++ b/backend/tests/test_review_imgs_to_gen.py @@ -0,0 +1,84 @@ +import unittest +import tempfile, os, shutil + +from tests.common import readTestFile, createTestDbTable +from tol_data.review_imgs_to_gen import reviewImgs + +CLICK_IMG = os.path.join(os.path.dirname(__file__), 'green.png') +AVOID_IMG = os.path.join(os.path.dirname(__file__), 'red.png') + +class TestReviewImgs(unittest.TestCase): + def test_review(self): + with tempfile.TemporaryDirectory() as tempDir: + # Create temp eol imgs + eolImgDir = os.path.join(tempDir, 'eol_imgs') + os.mkdir(eolImgDir) + shutil.copy(CLICK_IMG, os.path.join(eolImgDir, '1 10.jpg')) + shutil.copy(AVOID_IMG, os.path.join(eolImgDir, '2 20.gif')) + shutil.copy(AVOID_IMG, os.path.join(eolImgDir, '4 40.jpg')) + # Create temp enwiki imgs + enwikiImgDir = os.path.join(tempDir, 'enwiki_imgs') + os.mkdir(enwikiImgDir) + shutil.copy(AVOID_IMG, os.path.join(enwikiImgDir, '1.jpg')) + shutil.copy(CLICK_IMG, os.path.join(enwikiImgDir, '3.png')) + shutil.copy(CLICK_IMG, os.path.join(enwikiImgDir, '4.png')) + # Create temp tree-of-life db + dbFile = os.path.join(tempDir, 'data.db') + createTestDbTable( + dbFile, + 'CREATE TABLE nodes (name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT)', + 'INSERT INTO nodes VALUES (?, ?, ?)', + { + ('one', 'ott1', 1), + ('two', 'ott2', 10), + ('three', 'ott3', 2), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE names(name TEXT, alt_name TEXT, pref_alt INT, src TEXT, PRIMARY KEY(name, alt_name))', + 'INSERT OR IGNORE INTO names VALUES (?, ?, ?, ?)', + { + ('two', 'II', 1, 'eol'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE eol_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO eol_ids VALUES (?, ?)', + { + ('one', 1), + ('two', 2), + ('four', 4), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE wiki_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO wiki_ids VALUES (?, ?)', + { + ('one', 1), + ('three', 3), + ('four', 4), + } + ) + # Run + outFile = os.path.join(tempDir, 'imgList.txt') + reviewImgs(eolImgDir, enwikiImgDir, dbFile, outFile, 'all') + # Check + self.assertEqual(set(readTestFile(outFile).splitlines()), { + 'ott1 ' + os.path.join(eolImgDir, '1 10.jpg'), + 'ott2', + 'ott3 ' + os.path.join(enwikiImgDir, '3.png'), + }) + # Add extra data + createTestDbTable(dbFile, None, 'INSERT INTO nodes VALUES (?, ?, ?)',{('four', 'ott4', 2)}) + # Run + reviewImgs(eolImgDir, enwikiImgDir, dbFile, outFile, 'all') + # Check + self.assertEqual(set(readTestFile(outFile).splitlines()), { + 'ott1 ' + os.path.join(eolImgDir, '1 10.jpg'), + 'ott2', + 'ott3 ' + os.path.join(enwikiImgDir, '3.png'), + 'ott4 ' + os.path.join(enwikiImgDir, '4.png'), + }) diff --git a/backend/tests/test_tilo.py b/backend/tests/test_tilo.py new file mode 100644 index 0000000..cfc719a --- /dev/null +++ b/backend/tests/test_tilo.py @@ -0,0 +1,160 @@ +import unittest +import tempfile, os + +from tests.common import createTestDbTable +from tilo import handleReq, TolNode, SearchSuggResponse, SearchSugg, InfoResponse, NodeInfo, DescInfo, ImgInfo + +def initTestDb(dbFile: str) -> None: + # Test tree (I/D means image/desc): + # oneI -> twoD -> threeD + # -> fourI + # -> fiveI -> sixID -> seven + createTestDbTable( + dbFile, + 'CREATE TABLE nodes_t (name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT)', + 'INSERT INTO nodes_t VALUES (?, ?, ?)', + { + ('one', 'ott1', 3), + ('two', 'ott2', 2), + ('three', 'ott3', 1), + ('four', 'ott4', 1), + ('five', 'ott5', 1), + ('six', 'ott6', 1), + ('seven', 'ott7', 1), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE edges_t (parent TEXT, child TEXT, p_support INT, PRIMARY KEY (parent, child))', + 'INSERT INTO edges_t VALUES (?, ?, ?)', + { + ('one', 'two', 1), + ('two', 'three', 0), + ('two', 'four', 1), + ('one', 'five', 0), + ('five', 'six', 1), + ('six', 'seven', 1), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE names(name TEXT, alt_name TEXT, pref_alt INT, src TEXT, PRIMARY KEY(name, alt_name))', + 'INSERT INTO names VALUES (?, ?, ?, ?)', + { + ('one', 'turtle', 1, 'eol'), + ('two', 'II', 1, 'eol'), + ('five', 'V', 0, 'enwiki'), + ('six', 'VI', 1, 'enwiki'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE node_imgs (name TEXT PRIMARY KEY, img_id INT, src TEXT)', + 'INSERT INTO node_imgs VALUES (?, ?, ?)', + { + ('one', 1, 'eol'), + ('four', 10, 'enwiki'), + ('five', 10, 'enwiki'), + ('six', 1, 'picked'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE linked_imgs (name TEXT PRIMARY KEY, otol_ids TEXT)', + 'INSERT INTO linked_imgs VALUES (?, ?)', + { + ('two', 'ott4'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE images (' \ + 'id INT, src TEXT, url TEXT, license TEXT, artist TEXT, credit TEXT, PRIMARY KEY (id, src))', + 'INSERT INTO images VALUES (?, ?, ?, ?, ?, ?)', + { + (1, 'eol', 'url1', 'license1', 'artist1', 'credit1'), + (10, 'enwiki', 'url2', 'license2', 'artist2', 'credit2'), + (1, 'picked', 'url3', 'license3', 'artist3', 'credit3'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE node_iucn (name TEXT PRIMARY KEY, iucn TEXT)', + 'INSERT INTO node_iucn VALUES (?, ?)', + { + ('one', 'vulnerable'), + ('six', 'endangered'), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE node_pop (name TEXT PRIMARY KEY, pop INT)', + 'INSERT INTO node_pop VALUES (?, ?)', + { + ('one', 10), + ('two', 20), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE wiki_ids (name TEXT PRIMARY KEY, id INT)', + 'INSERT INTO wiki_ids VALUES (?, ?)', + { + ('two', 200), + ('three', 300), + ('six', 600), + } + ) + createTestDbTable( + dbFile, + 'CREATE TABLE descs (wiki_id INT PRIMARY KEY, desc TEXT, from_dbp INT)', + 'INSERT INTO descs VALUES (?, ?, ?)', + { + (200, 'two is 2', 1), + (300, 'three is 3', 0), + (600, 'six is 6', 1), + } + ) + +class TestHandleReq(unittest.TestCase): + def setUp(self): + self.maxDiff = None + self.tempDir = tempfile.TemporaryDirectory() + self.dbFile = os.path.join(self.tempDir.name, 'data.db') + initTestDb(self.dbFile) + def tearDown(self): + self.tempDir.cleanup() + def test_node_req(self): + response = handleReq(self.dbFile, {'QUERY_STRING': 'name=two&type=node&tree=trimmed'}) + self.assertEqual(response, { + 'two': TolNode('ott2', ['three', 'four'], 'one', 2, True, 'II', 'ott4.jpg', None), + 'three': TolNode('ott3', [], 'two', 1, False, None, None, None), + 'four': TolNode('ott4', [], 'two', 1, True, None, 'ott4.jpg', None), + }) + def test_node_toroot_req(self): + response = handleReq(self.dbFile, {'QUERY_STRING': 'name=seven&type=node&toroot=1&excl=five&tree=trimmed'}) + self.assertEqual(response, { + 'five': TolNode('ott5', ['six'], 'one', 1, 0, None, 'ott5.jpg', None), + 'six': TolNode('ott6', ['seven'], 'five', 1, 1, 'VI', 'ott6.jpg', 'endangered'), + 'seven': TolNode('ott7', [], 'six', 1, 1, None, None, None), + }) + def test_sugg_req(self): + response = handleReq(self.dbFile, {'QUERY_STRING': 'name=t&type=sugg&tree=trimmed'}) + self.assertEqual(response, SearchSuggResponse( + [ + SearchSugg('turtle', 'one', 10), + SearchSugg('two', None, 20), + SearchSugg('three', None, 0), + ], + False + )) + def test_info_req(self): + response = handleReq(self.dbFile, {'QUERY_STRING': 'name=six&type=info&tree=trimmed'}) + self.assertEqual(response, InfoResponse( + NodeInfo( + TolNode('ott6', ['seven'], 'five', 1, True, 'VI', 'ott6.jpg', 'endangered'), + DescInfo('six is 6', 600, True), + ImgInfo(1, 'picked', 'url3', 'license3', 'artist3', 'credit3'), + ), + [] + )) diff --git a/backend/tests/wikidata/__init__.py b/backend/tests/wikidata/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/wikidata/test_gen_taxon_src_data.py b/backend/tests/wikidata/test_gen_taxon_src_data.py new file mode 100644 index 0000000..1f886b3 --- /dev/null +++ b/backend/tests/wikidata/test_gen_taxon_src_data.py @@ -0,0 +1,109 @@ +import unittest +import tempfile, os, json, bz2, pickle, indexed_bzip2 + +from tests.common import readTestDbTable +from tol_data.wikidata.gen_taxon_src_data import genData + +def runGenData(wikiItemArray: str, preGenOffsets: bool, nProcs: int): + """ Sets up wikidata file to be read by genData(), runs it, reads the output database, and returns src+iucn info. + If 'preGenOffsets' is True, generates a bz2 offsets file before running genData(). """ + with tempfile.TemporaryDirectory() as tempDir: + # Create temp wikidata file + wikidataFile = os.path.join(tempDir, 'dump.json.bz2') + with bz2.open(wikidataFile, mode='wb') as file: + file.write(b'[\n') + for i in range(len(wikiItemArray)): + file.write(json.dumps(wikiItemArray[i], separators=(',',':')).encode()) + if i < len(wikiItemArray) - 1: + file.write(b',') + file.write(b'\n') + file.write(b']\n') + # Create temp offsets file if requested + offsetsFile = os.path.join(tempDir, 'offsets.dat') + if preGenOffsets: + with indexed_bzip2.open(wikidataFile) as file: + with open(offsetsFile, 'wb') as file2: + pickle.dump(file.block_offsets(), file2) + # Run genData() + dbFile = os.path.join(tempDir, 'data.db') + genData(wikidataFile, offsetsFile, dbFile, nProcs) + # Read db + srcRows = readTestDbTable(dbFile, 'SELECT src, id, title FROM src_id_to_title') + iucnRows = readTestDbTable(dbFile, 'SELECT title, status FROM title_iucn') + return srcRows, iucnRows + +class TestGenData(unittest.TestCase): + def setUp(self): + self.maxDiff = None # Remove output-diff size limit + self.testWikiItems = [ + { + 'id': 'Q1', + 'claims': { + 'P31': [{'mainsnak': {'datavalue': {'value': {'id': 'Q16521'}}}}], # instance-of 'taxon' + 'P830': [{'mainsnak': {'datavalue': {'value': 100}}}], # EOL ID 100 + 'P685': [{'mainsnak': {'datavalue': {'value': 200}}}], # NCBI ID 200 + 'P141': [{'mainsnak': {'datavalue': {'value': {'id': 'Q211005'}}}}], # IUCN 'least concern' + }, + 'sitelinks': {'enwiki': {'title': 'eucalyptus'}}, + }, + { + 'id': 'Q2', + 'claims': { + 'P685': [{'mainsnak': {'datavalue': {'value': 101}}}], # NCBI ID 101 + 'P31': [{'mainsnak': {'datavalue': {'value': {'id': 'Q23038290'}}}}], # fossil taxon + }, + 'sitelinks': {'enwiki': {'title': 'dolphin'}}, + }, + { + 'id': 'Q30', + 'claims': { + 'P31': [{'mainsnak': {'datavalue': {'value': {'id': 'Q502895'}}}, # instance-of common name + 'qualifiers': {'P642': [{'datavalue': {'value': {'numeric-id': 100}}}]}}], # of Q100 + 'P685': [{'mainsnak': {'datavalue': {'value': 333}}}], # NCBI ID 333 + }, + 'sitelinks': {'enwiki': {'title': 'dog'}}, + }, + { + 'id': 'Q100', + 'claims': { + 'P31': [{'mainsnak': {'datavalue': {'value': {'id': 'Q16521'}}}}], # instance-of taxon + 'P5055': [{'mainsnak': {'datavalue': {'value': 9}}}], # IRMNG ID 9 + 'P141': [{'mainsnak': {'datavalue': {'value': {'id': 'Q11394'}}}}], # IUCN endangered + }, + }, + { + 'id': 'Q1', + 'claims': { + 'P31': [{'mainsnak': {'datavalue': {'value': {'id': 'Q16521'}}}}], # instance-of taxon + } + # No title + }, + {'id': 'Q932', 'claims': {}}, + ] + self.expectedSrcRows = { + ('eol', 100, 'eucalyptus'), + ('ncbi', 200, 'eucalyptus'), + ('ncbi', 101, 'dolphin'), + ('ncbi', 333, 'dog'), + ('irmng', 9, 'dog'), + } + self.expectedIucnRows = { + ('eucalyptus', 'least concern'), + ('dog', 'endangered'), + } + def test_wikiItems(self): + srcMap, iucnMap = runGenData(self.testWikiItems, False, 1) + self.assertEqual(srcMap, self.expectedSrcRows) + self.assertEqual(iucnMap, self.expectedIucnRows) + def test_empty_dump(self): + srcMap, iucnMap = runGenData([{}], False, 1) + self.assertEqual(srcMap, set()) + self.assertEqual(iucnMap, set()) + def test_multiprocessing(self): + srcMap, iucnMap = runGenData(self.testWikiItems, False, 4) + self.assertEqual(srcMap, self.expectedSrcRows) + self.assertEqual(iucnMap, self.expectedIucnRows) + def test_existing_offsets(self): + srcMap, iucnMap = runGenData(self.testWikiItems, True, 3) + self.assertEqual(srcMap, self.expectedSrcRows) + self.assertEqual(iucnMap, self.expectedIucnRows) diff --git a/backend/tilo.py b/backend/tilo.py index c1ecc34..dfefab1 100755 --- a/backend/tilo.py +++ b/backend/tilo.py @@ -28,7 +28,7 @@ if __name__ == '__main__': parser = argparse.ArgumentParser(description=HELP_INFO, formatter_class=argparse.RawDescriptionHelpFormatter) parser.parse_args() -DB_FILE = 'tolData/data.db' +DB_FILE = 'tol_data/data.db' DEFAULT_SUGG_LIM = 5 MAX_SUGG_LIM = 50 ROOT_NAME = 'cellular organisms' @@ -45,7 +45,7 @@ class TolNode: pSupport=False, commonName: str | None = None, imgName: None | str | tuple[str, str] | tuple[None, str] | tuple[str, None] = None, - iucn: str = None): + iucn: str | None = None): self.otolId = otolId self.children = children self.parent = parent @@ -54,23 +54,52 @@ class TolNode: self.commonName = commonName self.imgName = imgName self.iucn = iucn + # Used in unit testing + def __eq__(self, other): + return isinstance(other, TolNode) and \ + (self.otolId, set(self.children), self.parent, self.tips, \ + self.pSupport, self.commonName, self.imgName, self.iucn) == \ + (other.otolId, set(other.children), other.parent, other.tips, \ + other.pSupport, other.commonName, other.imgName, other.iucn) + def __repr__(self): + return str(self.__dict__) class SearchSugg: """ Represents a search suggestion """ def __init__(self, name: str, canonicalName: str | None = None, pop=0): self.name = name self.canonicalName = canonicalName self.pop = pop if pop is not None else 0 + # Used in unit testing + def __eq__(self, other): + return isinstance(other, SearchSugg) and \ + (self.name, self.canonicalName, self.pop) == (other.name, other.canonicalName, other.pop) + def __repr__(self): + return str(self.__dict__) + def __hash__(self): + return (self.name, self.canonicalName, self.pop).__hash__() class SearchSuggResponse: """ Sent as responses to 'sugg' requests """ def __init__(self, searchSuggs: list[SearchSugg], hasMore: bool): self.suggs = searchSuggs self.hasMore = hasMore + # Used in unit testing + def __eq__(self, other): + return isinstance(other, SearchSuggResponse) and \ + (set(self.suggs), self.hasMore) == (set(other.suggs), other.hasMore) + def __repr__(self): + return str(self.__dict__) class DescInfo: """ Represents a node's associated description """ def __init__(self, text: str, wikiId: int, fromDbp: bool): self.text = text self.wikiId = wikiId self.fromDbp = fromDbp + # Used in unit testing + def __eq__(self, other): + return isinstance(other, DescInfo) and \ + (self.text, self.wikiId, self.fromDbp) == (other.text, other.wikiId, other.fromDbp) + def __repr__(self): + return str(self.__dict__) class ImgInfo: """ Represents a node's associated image """ def __init__(self, id: int, src: str, url: str, license: str, artist: str, credit: str): @@ -80,17 +109,36 @@ class ImgInfo: self.license = license self.artist = artist self.credit = credit + # Used in unit testing + def __eq__(self, other): + return isinstance(other, ImgInfo) and \ + (self.id, self.src, self.url, self.license, self.artist, self.credit) == \ + (other.id, other.src, other.url, other.license, other.artist, other.credit) + def __repr__(self): + return str(self.__dict__) class NodeInfo: """ Represents info about a node """ def __init__(self, tolNode: TolNode, descInfo: DescInfo | None, imgInfo: ImgInfo | None): self.tolNode = tolNode self.descInfo = descInfo self.imgInfo = imgInfo + # Used in unit testing + def __eq__(self, other): + return isinstance(other, NodeInfo) and \ + (self.tolNode, self.descInfo, self.imgInfo) == (other.tolNode, other.descInfo, other.imgInfo) + def __repr__(self): + return str(self.__dict__) class InfoResponse: """ Sent as responses to 'info' requests """ def __init__(self, nodeInfo: NodeInfo, subNodesInfo: tuple[()] | tuple[NodeInfo | None, NodeInfo | None]): self.nodeInfo = nodeInfo self.subNodesInfo = subNodesInfo + # Used in unit testing + def __eq__(self, other): + return isinstance(other, InfoResponse) and \ + (self.nodeInfo, self.subNodesInfo) == (other.nodeInfo, other.subNodesInfo) + def __repr__(self): + return str(self.__dict__) # For data lookup def lookupNodes(names: list[str], tree: str, dbCur: sqlite3.Cursor) -> dict[str, TolNode]: @@ -123,8 +171,9 @@ def lookupNodes(names: list[str], tree: str, dbCur: sqlite3.Cursor) -> dict[str, nameToNodes[childName].pSupport = pSupport == 1 # Get image names idsToNames = {nameToNodes[n].otolId: n for n in nameToNodes.keys()} - query = 'SELECT nodes.id from nodes INNER JOIN node_imgs ON nodes.name = node_imgs.name' \ - ' WHERE nodes.id IN ({})'.format(','.join(['?'] * len(idsToNames))) + query = f'SELECT {nodesTable}.id from {nodesTable}' \ + f' INNER JOIN node_imgs ON {nodesTable}.name = node_imgs.name' \ + f' WHERE {nodesTable}.id IN ' '({})'.format(','.join(['?'] * len(idsToNames))) for (otolId,) in dbCur.execute(query, list(idsToNames.keys())): nameToNodes[idsToNames[otolId]].imgName = otolId + '.jpg' # Get 'linked' images for unresolved names @@ -143,11 +192,13 @@ def lookupNodes(names: list[str], tree: str, dbCur: sqlite3.Cursor) -> dict[str, # Get preferred-name info query = f'SELECT name, alt_name FROM names WHERE pref_alt = 1 AND name IN ({queryParamStr})' for name, altName in dbCur.execute(query, names): - nameToNodes[name].commonName = altName + if name in nameToNodes: + nameToNodes[name].commonName = altName # Get IUCN status query = f'SELECT name, iucn FROM node_iucn WHERE name IN ({queryParamStr})' for name, iucn in dbCur.execute(query, names): - nameToNodes[name].iucn = iucn + if name in nameToNodes: + nameToNodes[name].iucn = iucn # return nameToNodes def lookupSuggs(searchStr: str, suggLimit: int, tree: str, dbCur: sqlite3.Cursor) -> SearchSuggResponse: @@ -157,7 +208,7 @@ def lookupSuggs(searchStr: str, suggLimit: int, tree: str, dbCur: sqlite3.Cursor nodesTable = f'nodes_{getTableSuffix(tree)}' nameQuery = f'SELECT {nodesTable}.name, node_pop.pop FROM {nodesTable}' \ f' LEFT JOIN node_pop ON {nodesTable}.name = node_pop.name' \ - f' WHERE node_pop.name LIKE ? AND node_pop.name NOT LIKE "[%"' \ + f' WHERE {nodesTable}.name LIKE ? AND {nodesTable}.name NOT LIKE "[%"' \ f' ORDER BY node_pop.pop DESC' altNameQuery = f'SELECT alt_name, names.name, pref_alt, node_pop.pop FROM' \ f' names INNER JOIN {nodesTable} ON names.name = {nodesTable}.name' \ @@ -204,6 +255,7 @@ def lookupSuggs(searchStr: str, suggLimit: int, tree: str, dbCur: sqlite3.Cursor return SearchSuggResponse(suggList[:suggLimit], hasMore) def lookupInfo(name: str, tree: str, dbCur: sqlite3.Cursor) -> InfoResponse | None: """ For a node name, returns a descriptive InfoResponse, or None """ + nodesTable = f'nodes_{getTableSuffix(tree)}' # Get node info nameToNodes = lookupNodes([name], tree, dbCur) tolNode = nameToNodes[name] if name in nameToNodes else None @@ -230,10 +282,10 @@ def lookupInfo(name: str, tree: str, dbCur: sqlite3.Cursor) -> InfoResponse | No idsToNames = {cast(str, nameToNodes[n].imgName)[:-4]: n for n in namesToLookup if nameToNodes[n].imgName is not None} idsToLookup = list(idsToNames.keys()) # Lookup using IDs avoids having to check linked_imgs - query = 'SELECT nodes.id, images.id, images.src, url, license, artist, credit FROM' \ - ' nodes INNER JOIN node_imgs ON nodes.name = node_imgs.name' \ - ' INNER JOIN images ON node_imgs.img_id = images.id AND node_imgs.src = images.src' \ - ' WHERE nodes.id IN ({})'.format(','.join(['?'] * len(idsToLookup))) + query = f'SELECT {nodesTable}.id, images.id, images.src, url, license, artist, credit FROM' \ + f' {nodesTable} INNER JOIN node_imgs ON {nodesTable}.name = node_imgs.name' \ + f' INNER JOIN images ON node_imgs.img_id = images.id AND node_imgs.src = images.src' \ + f' WHERE {nodesTable}.id IN ' '({})'.format(','.join(['?'] * len(idsToLookup))) for id, imgId, imgSrc, url, license, artist, credit in dbCur.execute(query, idsToLookup): nameToImgInfo[idsToNames[id]] = ImgInfo(imgId, imgSrc, url, license, artist, credit) # Construct response @@ -251,10 +303,11 @@ def getTableSuffix(tree: str) -> str: """ converts a reduced-tree descriptor into a sql-table-suffix """ return 't' if tree == 'trimmed' else 'i' if tree == 'images' else 'p' -def handleReq( - dbCur: sqlite3.Cursor, - environ: dict[str, str]) -> None | dict[str, TolNode] | SearchSuggResponse | InfoResponse: +def handleReq(dbFile: str, environ: dict[str, str]) -> None | dict[str, TolNode] | SearchSuggResponse | InfoResponse: """ Queries the database, and constructs a response object """ + # Open db + dbCon = sqlite3.connect(dbFile) + dbCur = dbCon.cursor() # Get query params queryStr = environ['QUERY_STRING'] if 'QUERY_STRING' in environ else '' queryDict = urllib.parse.parse_qs(queryStr) @@ -342,11 +395,8 @@ def handleReq( return None def application(environ: dict[str, str], start_response) -> Iterable[bytes]: """ Entry point for the WSGI script """ - # Open db - dbCon = sqlite3.connect(DB_FILE) - dbCur = dbCon.cursor() # Get response object - val = handleReq(dbCur, environ) + val = handleReq(DB_FILE, environ) # Construct response data = jsonpickle.encode(val, unpicklable=False).encode() headers = [('Content-type', 'application/json')] diff --git a/backend/tolData/README.md b/backend/tolData/README.md deleted file mode 100644 index 3b78af8..0000000 --- a/backend/tolData/README.md +++ /dev/null @@ -1,149 +0,0 @@ -This directory holds files used to generate the tree-of-life database data.db. - -# Database Tables -## Tree Structure -- `nodes`
- Format : `name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT`
- Represents a tree-of-life node. `tips` holds the number of no-child descendants -- `edges`
- Format: `parent TEXT, child TEXT, p_support INT, PRIMARY KEY (parent, child)`
- `p_support` is 1 if the edge has 'phylogenetic support', and 0 otherwise -## Node Mappings -- `eol_ids`
- Format: `name TEXT PRIMARY KEY, id INT`
- Associates nodes with EOL IDs -- `wiki_ids`
- Format: `name TEXT PRIMARY KEY, id INT`
- Associates nodes with wikipedia page IDs -## Node Vernacular Names -- `names`
- Format: `name TEXT, alt_name TEXT, pref_alt INT, src TEXT, PRIMARY KEY(name, alt_name)`
- Associates a node with alternative names. - `pref_alt` is 1 if the alt-name is the most 'preferred' one. - `src` indicates the dataset the alt-name was obtained from (can be 'eol', 'enwiki', or 'picked'). -## Node Descriptions -- `descs`
- Format: `wiki_id INT PRIMARY KEY, desc TEXT, from_dbp INT`
- Associates a wikipedia page ID with a short-description. - `from_dbp` is 1 if the description was obtained from DBpedia, and 0 otherwise. -## Node Images -- `node_imgs`
- Format: `name TEXT PRIMARY KEY, img_id INT, src TEXT`
- Associates a node with an image. -- `images`
- Format: `id INT, src TEXT, url TEXT, license TEXT, artist TEXT, credit TEXT, PRIMARY KEY (id, src)`
- Represents an image, identified by a source ('eol', 'enwiki', or 'picked'), and a source-specific ID. -- `linked_imgs`
- Format: `name TEXT PRIMARY KEY, otol_ids TEXT`
- Associates a node with an image from another node. - `otol_ids` can be an otol ID, or (for compound nodes) two comma-separated strings that may be otol IDs or empty. -## Reduced Trees -- `nodes_t`, `nodes_i`, `nodes_p`
- These are like `nodes`, but describe nodes of reduced trees. -- `edges_t`, `edges_i`, `edges_p`
- Like `edges` but for reduced trees. -## Other -- `node_iucn`
- Format: `name TEXT PRIMARY KEY, iucn TEXT`
- Associates nodes with IUCN conservation status strings (eg: 'endangered') -- `node_pop`
- Format: `name TEXT PRIMARY KEY, pop INT`
- Associates nodes with popularity values (higher means more popular) - -# Generating the Database - -As a warning, the whole process takes a lot of time and file space. The -tree will probably have about 2.6 million nodes. Downloading the images -takes several days, and occupies over 200 GB. - -## Environment -Some of the scripts use third-party packages: -- `indexed_bzip2`: For parallelised bzip2 processing. -- `jsonpickle`: For encoding class objects as JSON. -- `requests`: For downloading data. -- `PIL`: For image processing. -- `tkinter`: For providing a basic GUI to review images. -- `mwxml`, `mwparserfromhell`: For parsing Wikipedia dumps. - -## Generate Tree Structure Data -1. Obtain 'tree data files' in otol/, as specified in it's README. -2. Run genOtolData.py, which creates data.db, and adds the `nodes` and `edges` tables, - using data in otol/. It also uses these files, if they exist: - - pickedOtolNames.txt: Has lines of the form `name1|otolId1`. - Can be used to override numeric suffixes added to same-name nodes. - -## Generate Dataset Mappings -1. Obtain 'taxonomy data files' in otol/, 'mapping files' in eol/, - files in wikidata/, and 'dump-index files' in enwiki/, as specified - in their READMEs. -2. Run genMappingData.py, which adds the `eol_ids` and `wiki_ids` tables, - as well as `node_iucn`. It uses the files obtained above, the `nodes` table, - and 'picked mappings' files, if they exist. - - pickedEolIds.txt contains lines like `3785967|405349`, specifying - an otol ID and an eol ID to map it to. The eol ID can be empty, - in which case the otol ID won't be mapped. - - pickedWikiIds.txt and pickedWikiIdsRough.txt contain lines like - `5341349|Human`, specifying an otol ID and an enwiki title, - which may contain spaces. The title can be empty. - -## Generate Node Name Data -1. Obtain 'name data files' in eol/, and 'description database files' in enwiki/, - as specified in their READMEs. -2. Run genNameData.py, which adds the `names` table, using data in eol/ and enwiki/, - along with the `nodes`, `eol_ids`, and `wiki_ids` tables.
- It also uses pickedNames.txt, if it exists. This file can hold lines like - `embryophyta|land plant|1`, specifying a node name, an alt-name to add for it, - and a 1 or 0 indicating whether it is a 'preferred' alt-name. The last field - can be empty, which indicates that the alt-name should be removed, or, if the - alt-name is the same as the node name, that no alt-name should be preferred. - -## Generate Node Description Data -1. Obtain files in dbpedia/, as specified in it's README. -2. Run genDescData.py, which adds the `descs` table, using data in dbpedia/ and - enwiki/, and the `nodes` table. - -## Generate Node Images Data -### Get images from EOL -1. Obtain 'image metadata files' in eol/, as specified in it's README. -2. In eol/, run downloadImgs.py, which downloads images (possibly multiple per node), - into eol/imgsForReview, using data in eol/, as well as the `eol_ids` table. -3. In eol/, run reviewImgs.py, which interactively displays the downloaded images for - each node, providing the choice of which to use, moving them to eol/imgs/. - Uses `names` and `eol_ids` to display extra info. -### Get Images from Wikipedia -1. In enwiki/, run genImgData.py, which looks for wikipedia image names for each node, - using the `wiki_ids` table, and stores them in a database. -2. In enwiki/, run downloadImgLicenseInfo.py, which downloads licensing information for - those images, using wikipedia's online API. -3. In enwiki/, run downloadImgs.py, which downloads 'permissively-licensed' - images into enwiki/imgs/. -### Merge the Image Sets -1. Run reviewImgsToGen.py, which displays images from eol/imgs/ and enwiki/imgs/, - and enables choosing, for each node, which image should be used, if any, - and outputs choice information into imgList.txt. Uses the `nodes`, - `eol_ids`, and `wiki_ids` tables (as well as `names` to display extra info). -2. Run genImgs.py, which creates cropped/resized images in img/, from files listed in - imgList.txt and located in eol/ and enwiki/, and creates the `node_imgs` and - `images` tables. If pickedImgs/ is present, images within it are also used.
- The outputs might need to be manually created/adjusted: - - An input image might have no output produced, possibly due to - data incompatibilities, memory limits, etc. A few input image files - might actually be html files, containing a 'file not found' page. - - An input x.gif might produce x-1.jpg, x-2.jpg, etc, instead of x.jpg. - - An input image might produce output with unexpected dimensions. - This seems to happen when the image is very large, and triggers a - decompression bomb warning. -### Add more Image Associations -1. Run genLinkedImgs.py, which tries to associate nodes without images to - images of it's children. Adds the `linked_imgs` table, and uses the - `nodes`, `edges`, and `node_imgs` tables. - -## Generate Reduced Trees -1. Run genReducedTrees.py, which generates multiple reduced versions of the tree, - adding the `nodes_*` and `edges_*` tables, using `nodes` and `names`. Reads from - pickedNodes.txt, which lists names of nodes that must be included (1 per line). - -## Generate Node Popularity Data -1. Obtain 'page view files' in enwiki/Run genPopData.py, as specified in it's README. -2. Run genPopData.py, which adds the `node_pop` table, using data in enwiki/, - and the `wiki_ids` table. diff --git a/backend/tolData/dbpedia/README.md b/backend/tolData/dbpedia/README.md deleted file mode 100644 index dd9bda7..0000000 --- a/backend/tolData/dbpedia/README.md +++ /dev/null @@ -1,29 +0,0 @@ -This directory holds files obtained/derived from [Dbpedia](https://www.dbpedia.org). - -# Downloaded Files -- `labels_lang=en.ttl.bz2`
- Obtained via https://databus.dbpedia.org/dbpedia/collections/latest-core. - Downloaded from . -- `page_lang=en_ids.ttl.bz2`
- Downloaded from -- `redirects_lang=en_transitive.ttl.bz2`
- Downloaded from . -- `disambiguations_lang=en.ttl.bz2`
- Downloaded from . -- `instance-types_lang=en_specific.ttl.bz2`
- Downloaded from . -- `short-abstracts_lang=en.ttl.bz2`
- Downloaded from . - -# Other Files -- genDescData.py
- Used to generate a database representing data from the ttl files. -- descData.db
- Generated by genDescData.py.
- Tables:
- - `labels`: `iri TEXT PRIMARY KEY, label TEXT ` - - `ids`: `iri TEXT PRIMARY KEY, id INT` - - `redirects`: `iri TEXT PRIMARY KEY, target TEXT` - - `disambiguations`: `iri TEXT PRIMARY KEY` - - `types`: `iri TEXT, type TEXT` - - `abstracts`: `iri TEXT PRIMARY KEY, abstract TEXT` diff --git a/backend/tolData/dbpedia/genDescData.py b/backend/tolData/dbpedia/genDescData.py deleted file mode 100755 index 43ed815..0000000 --- a/backend/tolData/dbpedia/genDescData.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python3 - -import re -import bz2, sqlite3 - -import argparse -parser = argparse.ArgumentParser(description=""" -Adds DBpedia labels/types/abstracts/etc data into a database -""", formatter_class=argparse.RawDescriptionHelpFormatter) -parser.parse_args() - -labelsFile = 'labels_lang=en.ttl.bz2' # Had about 16e6 entries -idsFile = 'page_lang=en_ids.ttl.bz2' -redirectsFile = 'redirects_lang=en_transitive.ttl.bz2' -disambigFile = 'disambiguations_lang=en.ttl.bz2' -typesFile = 'instance-types_lang=en_specific.ttl.bz2' -abstractsFile = 'short-abstracts_lang=en.ttl.bz2' -dbFile = 'descData.db' -# In testing, this script took a few hours to run, and generated about 10GB - -print('Creating database') -dbCon = sqlite3.connect(dbFile) -dbCur = dbCon.cursor() - -print('Reading/storing label data') -dbCur.execute('CREATE TABLE labels (iri TEXT PRIMARY KEY, label TEXT)') -dbCur.execute('CREATE INDEX labels_idx ON labels(label)') -dbCur.execute('CREATE INDEX labels_idx_nc ON labels(label COLLATE NOCASE)') -labelLineRegex = re.compile(r'<([^>]+)> <[^>]+> "((?:[^"]|\\")+)"@en \.\n') -lineNum = 0 -with bz2.open(labelsFile, mode='rt') as file: - for line in file: - lineNum += 1 - if lineNum % 1e5 == 0: - print(f'At line {lineNum}') - # - match = labelLineRegex.fullmatch(line) - if match is None: - raise Exception(f'ERROR: Line {lineNum} has unexpected format') - dbCur.execute('INSERT INTO labels VALUES (?, ?)', (match.group(1), match.group(2))) - -print('Reading/storing wiki page ids') -dbCur.execute('CREATE TABLE ids (iri TEXT PRIMARY KEY, id INT)') -dbCur.execute('CREATE INDEX ids_idx ON ids(id)') -idLineRegex = re.compile(r'<([^>]+)> <[^>]+> "(\d+)".*\n') -lineNum = 0 -with bz2.open(idsFile, mode='rt') as file: - for line in file: - lineNum += 1 - if lineNum % 1e5 == 0: - print(f'At line {lineNum}') - # - match = idLineRegex.fullmatch(line) - if match is None: - raise Exception(f'ERROR: Line {lineNum} has unexpected format') - try: - dbCur.execute('INSERT INTO ids VALUES (?, ?)', (match.group(1), int(match.group(2)))) - except sqlite3.IntegrityError as e: - # Accounts for certain lines that have the same IRI - print(f'WARNING: Failed to add entry with IRI "{match.group(1)}": {e}') - -print('Reading/storing redirection data') -dbCur.execute('CREATE TABLE redirects (iri TEXT PRIMARY KEY, target TEXT)') -redirLineRegex = re.compile(r'<([^>]+)> <[^>]+> <([^>]+)> \.\n') -lineNum = 0 -with bz2.open(redirectsFile, mode='rt') as file: - for line in file: - lineNum += 1 - if lineNum % 1e5 == 0: - print(f'At line {lineNum}') - # - match = redirLineRegex.fullmatch(line) - if match is None: - raise Exception(f'ERROR: Line {lineNum} has unexpected format') - dbCur.execute('INSERT INTO redirects VALUES (?, ?)', (match.group(1), match.group(2))) - -print('Reading/storing diambiguation-page data') -dbCur.execute('CREATE TABLE disambiguations (iri TEXT PRIMARY KEY)') -disambigLineRegex = redirLineRegex -lineNum = 0 -with bz2.open(disambigFile, mode='rt') as file: - for line in file: - lineNum += 1 - if lineNum % 1e5 == 0: - print(f'At line {lineNum}') - # - match = disambigLineRegex.fullmatch(line) - if match is None: - raise Exception(f'ERROR: Line {lineNum} has unexpected format') - dbCur.execute('INSERT OR IGNORE INTO disambiguations VALUES (?)', (match.group(1),)) - -print('Reading/storing instance-type data') -dbCur.execute('CREATE TABLE types (iri TEXT, type TEXT)') -dbCur.execute('CREATE INDEX types_iri_idx ON types(iri)') -typeLineRegex = redirLineRegex -lineNum = 0 -with bz2.open(typesFile, mode='rt') as file: - for line in file: - lineNum += 1 - if lineNum % 1e5 == 0: - print(f'At line {lineNum}') - # - match = typeLineRegex.fullmatch(line) - if match is None: - raise Exception(f'ERROR: Line {lineNum} has unexpected format') - dbCur.execute('INSERT INTO types VALUES (?, ?)', (match.group(1), match.group(2))) - -print('Reading/storing abstracts') -dbCur.execute('CREATE TABLE abstracts (iri TEXT PRIMARY KEY, abstract TEXT)') -descLineRegex = labelLineRegex -lineNum = 0 -with bz2.open(abstractsFile, mode='rt') as file: - for line in file: - lineNum += 1 - if lineNum % 1e5 == 0: - print(f'At line {lineNum}') - # - if line[0] == '#': - continue - match = descLineRegex.fullmatch(line) - if match is None: - raise Exception(f'ERROR: Line {lineNum} has unexpected format') - dbCur.execute('INSERT INTO abstracts VALUES (?, ?)', - (match.group(1), match.group(2).replace(r'\"', '"'))) - -print('Closing database') -dbCon.commit() -dbCon.close() diff --git a/backend/tolData/enwiki/README.md b/backend/tolData/enwiki/README.md deleted file mode 100644 index 76f9ee5..0000000 --- a/backend/tolData/enwiki/README.md +++ /dev/null @@ -1,63 +0,0 @@ -This directory holds files obtained/derived from [English Wikipedia](https://en.wikipedia.org/wiki/Main_Page). - -# Downloaded Files -- enwiki-20220501-pages-articles-multistream.xml.bz2
- Contains text content and metadata for pages in enwiki. - Obtained via (site suggests downloading from a mirror). - Some file content and format information was available from - . -- enwiki-20220501-pages-articles-multistream-index.txt.bz2
- Obtained like above. Holds lines of the form offset1:pageId1:title1, - providing, for each page, an offset into the dump file of a chunk of - 100 pages that includes it. - -# Dump-Index Files -- genDumpIndexDb.py
- Creates a database version of the enwiki-dump index file. -- dumpIndex.db
- Generated by genDumpIndexDb.py.
- Tables:
- - `offsets`: `title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT` - -# Description Database Files -- genDescData.py
- Reads through pages in the dump file, and adds short-description info to a database. -- descData.db
- Generated by genDescData.py.
- Tables:
- - `pages`: `id INT PRIMARY KEY, title TEXT UNIQUE` - - `redirects`: `id INT PRIMARY KEY, target TEXT` - - `descs`: `id INT PRIMARY KEY, desc TEXT` - -# Image Database Files -- genImgData.py
- Used to find infobox image names for page IDs, storing them into a database. -- downloadImgLicenseInfo.py
- Used to download licensing metadata for image names, via wikipedia's online API, storing them into a database. -- imgData.db
- Used to hold metadata about infobox images for a set of pageIDs. - Generated using getEnwikiImgData.py and downloadImgLicenseInfo.py.
- Tables:
- - `page_imgs`: `page_id INT PRIMAY KEY, img_name TEXT`
- `img_name` may be null, which means 'none found', and is used to avoid re-processing page-ids. - - `imgs`: `name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT`
- Might lack some matches for `img_name` in `page_imgs`, due to licensing info unavailability. -- downloadImgs.py
- Used to download image files into imgs/. - -# Page View Files -- pageviews/pageviews-*-user.bz2 - Each holds wikimedia article page view data for some month. - Obtained via . - Some format info was available from . -- genPageviewData.py
- Reads pageview/*, and creates a database holding average monthly pageview counts. -- pageviewData.db
- Generated using genPageviewData.py.
- Tables:
- - `views`: `title TEXT PRIMARY KEY, id INT, views INT` - -# Other Files -- lookupPage.py
- Running `lookupPage.py title1` looks in the dump for a page with a given title, - and prints the contents to stdout. Uses dumpIndex.db. diff --git a/backend/tolData/enwiki/downloadImgLicenseInfo.py b/backend/tolData/enwiki/downloadImgLicenseInfo.py deleted file mode 100755 index ba6317e..0000000 --- a/backend/tolData/enwiki/downloadImgLicenseInfo.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python3 - -import re -import sqlite3, urllib.parse, html -import requests -import time, signal - -import argparse -parser = argparse.ArgumentParser(description=""" -Reads image names from a database, and uses enwiki's online API to obtain -licensing information for them, adding the info to the database. - -SIGINT causes the program to finish an ongoing download and exit. -The program can be re-run to continue downloading, and looks -at already-processed names to decide what to skip. -""", formatter_class=argparse.RawDescriptionHelpFormatter) -parser.parse_args() - -imgDb = 'imgData.db' -apiUrl = 'https://en.wikipedia.org/w/api.php' -userAgent = 'terryt.dev (terry06890@gmail.com)' -batchSz = 50 # Max 50 -tagRegex = re.compile(r'<[^<]+>') -whitespaceRegex = re.compile(r'\s+') - -print('Opening database') -dbCon = sqlite3.connect(imgDb) -dbCur = dbCon.cursor() -dbCur2 = dbCon.cursor() -print('Checking for table') -if dbCur.execute('SELECT name FROM sqlite_master WHERE type="table" AND name="imgs"').fetchone() is None: - dbCur.execute('CREATE TABLE imgs(' \ - 'name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT)') - -print('Reading image names') -imgNames: set[str] = set() -for (imgName,) in dbCur.execute('SELECT DISTINCT img_name FROM page_imgs WHERE img_name NOT NULL'): - imgNames.add(imgName) -print(f'Found {len(imgNames)}') - -print('Checking for already-processed images') -oldSz = len(imgNames) -for (imgName,) in dbCur.execute('SELECT name FROM imgs'): - imgNames.discard(imgName) -print(f'Found {oldSz - len(imgNames)}') - -# Set SIGINT handler -interrupted = False -oldHandler = None -def onSigint(sig, frame): - global interrupted - interrupted = True - signal.signal(signal.SIGINT, oldHandler) -oldHandler = signal.signal(signal.SIGINT, onSigint) - -print('Iterating through image names') -imgNameList = list(imgNames) -iterNum = 0 -for i in range(0, len(imgNameList), batchSz): - iterNum += 1 - if iterNum % 1 == 0: - print(f'At iteration {iterNum} (after {(iterNum - 1) * batchSz} images)') - if interrupted: - print(f'Exiting loop at iteration {iterNum}') - break - # Get batch - imgBatch = imgNameList[i:i+batchSz] - imgBatch = ['File:' + x for x in imgBatch] - # Make request - headers = { - 'user-agent': userAgent, - 'accept-encoding': 'gzip', - } - params = { - 'action': 'query', - 'format': 'json', - 'prop': 'imageinfo', - 'iiprop': 'extmetadata|url', - 'maxlag': '5', - 'titles': '|'.join(imgBatch), - 'iiextmetadatafilter': 'Artist|Credit|LicenseShortName|Restrictions', - } - responseObj = None - try: - response = requests.get(apiUrl, params=params, headers=headers) - responseObj = response.json() - except Exception as e: - print(f'ERROR: Exception while downloading info: {e}') - print('\tImage batch: ' + '|'.join(imgBatch)) - continue - # Parse response-object - if 'query' not in responseObj or 'pages' not in responseObj['query']: - print('WARNING: Response object for doesn\'t have page data') - print('\tImage batch: ' + '|'.join(imgBatch)) - if 'error' in responseObj: - errorCode = responseObj['error']['code'] - print(f'\tError code: {errorCode}') - if errorCode == 'maxlag': - time.sleep(5) - continue - pages = responseObj['query']['pages'] - normalisedToInput: dict[str, str] = {} - if 'normalized' in responseObj['query']: - for entry in responseObj['query']['normalized']: - normalisedToInput[entry['to']] = entry['from'] - for _, page in pages.items(): - # Some fields // More info at https://www.mediawiki.org/wiki/Extension:CommonsMetadata#Returned_data - # LicenseShortName: short human-readable license name, apparently more reliable than 'License', - # Artist: author name (might contain complex html, multiple authors, etc) - # Credit: 'source' - # For image-map-like images, can be quite large/complex html, creditng each sub-image - # May be text2, where the text2 might be non-indicative - # Restrictions: specifies non-copyright legal restrictions - title: str = page['title'] - if title in normalisedToInput: - title = normalisedToInput[title] - title = title[5:] # Remove 'File:' - if title not in imgNames: - print(f'WARNING: Got title "{title}" not in image-name list') - continue - if 'imageinfo' not in page: - print(f'WARNING: No imageinfo section for page "{title}"') - continue - metadata = page['imageinfo'][0]['extmetadata'] - url: str = page['imageinfo'][0]['url'] - license: str | None = metadata['LicenseShortName']['value'] if 'LicenseShortName' in metadata else None - artist: str | None = metadata['Artist']['value'] if 'Artist' in metadata else None - credit: str | None = metadata['Credit']['value'] if 'Credit' in metadata else None - restrictions: str | None = metadata['Restrictions']['value'] if 'Restrictions' in metadata else None - # Remove markup - if artist is not None: - artist = tagRegex.sub(' ', artist) - artist = whitespaceRegex.sub(' ', artist) - artist = html.unescape(artist) - artist = urllib.parse.unquote(artist) - if credit is not None: - credit = tagRegex.sub(' ', credit) - credit = whitespaceRegex.sub(' ', credit) - credit = html.unescape(credit) - credit = urllib.parse.unquote(credit) - # Add to db - dbCur2.execute('INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)', - (title, license, artist, credit, restrictions, url)) - -print('Closing database') -dbCon.commit() -dbCon.close() diff --git a/backend/tolData/enwiki/downloadImgs.py b/backend/tolData/enwiki/downloadImgs.py deleted file mode 100755 index def4714..0000000 --- a/backend/tolData/enwiki/downloadImgs.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/python3 - -import sys, re, os -import sqlite3 -import urllib.parse, requests -import time, signal - -import argparse -parser = argparse.ArgumentParser(description=""" -Downloads images from URLs in an image database, into an output directory, -with names of the form 'pageId1.ext1'. - -SIGINT causes the program to finish an ongoing download and exit. -The program can be re-run to continue downloading, and looks -in the output directory do decide what to skip. -""", formatter_class=argparse.RawDescriptionHelpFormatter) -parser.parse_args() - -imgDb = 'imgData.db' # About 130k image names -outDir = 'imgs' -licenseRegex = re.compile(r'cc0|cc([ -]by)?([ -]sa)?([ -][1234]\.[05])?( \w\w\w?)?', flags=re.IGNORECASE) -# In testing, this downloaded about 100k images, over several days - -if not os.path.exists(outDir): - os.mkdir(outDir) -print('Checking for already-downloaded images') -fileList = os.listdir(outDir) -pageIdsDone: set[int] = set() -for filename in fileList: - basename, extension = os.path.splitext(filename) - pageIdsDone.add(int(basename)) -print(f'Found {len(pageIdsDone)}') - -# Set SIGINT handler -interrupted = False -oldHandler = None -def onSigint(sig, frame): - global interrupted - interrupted = True - signal.signal(signal.SIGINT, oldHandler) -oldHandler = signal.signal(signal.SIGINT, onSigint) - -print('Opening database') -dbCon = sqlite3.connect(imgDb) -dbCur = dbCon.cursor() -print('Starting downloads') -iterNum = 0 -query = 'SELECT page_id, license, artist, credit, restrictions, url FROM' \ - ' imgs INNER JOIN page_imgs ON imgs.name = page_imgs.img_name' -for pageId, license, artist, credit, restrictions, url in dbCur.execute(query): - if pageId in pageIdsDone: - continue - if interrupted: - print('Exiting loop') - break - # Check for problematic attributes - if license is None or licenseRegex.fullmatch(license) is None: - continue - if artist is None or artist == '' or len(artist) > 100 or re.match(r'(\d\. )?File:', artist) is not None: - continue - if credit is None or len(credit) > 300 or re.match(r'File:', credit) is not None: - continue - if restrictions is not None and restrictions != '': - continue - # Download image - iterNum += 1 - print(f'Iteration {iterNum}: Downloading for page-id {pageId}') - urlParts = urllib.parse.urlparse(url) - extension = os.path.splitext(urlParts.path)[1] - if len(extension) <= 1: - print(f'WARNING: No filename extension found in URL {url}') - sys.exit(1) - outFile = f'{outDir}/{pageId}{extension}' - headers = { - 'user-agent': 'terryt.dev (terry06890@gmail.com)', - 'accept-encoding': 'gzip', - } - try: - response = requests.get(url, headers=headers) - with open(outFile, 'wb') as file: - file.write(response.content) - time.sleep(1) - # https://en.wikipedia.org/wiki/Wikipedia:Database_download says to 'throttle self to 1 cache miss per sec' - # It's unclear how to properly check for cache misses, so this just aims for 1 per sec - except Exception as e: - print(f'Error while downloading to {outFile}: {e}') -print('Closing database') -dbCon.close() diff --git a/backend/tolData/enwiki/genDescData.py b/backend/tolData/enwiki/genDescData.py deleted file mode 100755 index 1698f5c..0000000 --- a/backend/tolData/enwiki/genDescData.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/python3 - -import sys, os, re -import bz2 -import html, mwxml, mwparserfromhell -import sqlite3 - -import argparse -parser = argparse.ArgumentParser(description=""" -Reads through the wiki dump, and attempts to parse short-descriptions, -and add them to a database -""", formatter_class=argparse.RawDescriptionHelpFormatter) -parser.parse_args() - -dumpFile = 'enwiki-20220501-pages-articles-multistream.xml.bz2' # Had about 22e6 pages -enwikiDb = 'descData.db' -# In testing, this script took over 10 hours to run, and generated about 5GB - -descLineRegex = re.compile('^ *[A-Z\'"]') -embeddedHtmlRegex = re.compile(r'<[^<]+/>||<[^([^<]*|[^<]*<[^<]+>[^<]*)|<[^<]+$') - # Recognises a self-closing HTML tag, a tag with 0 children, tag with 1 child with 0 children, or unclosed tag -convertTemplateRegex = re.compile(r'{{convert\|(\d[^|]*)\|(?:(to|-)\|(\d[^|]*)\|)?([a-z][^|}]*)[^}]*}}') -def convertTemplateReplace(match): - if match.group(2) is None: - return f'{match.group(1)} {match.group(4)}' - else: - return f'{match.group(1)} {match.group(2)} {match.group(3)} {match.group(4)}' -parensGroupRegex = re.compile(r' \([^()]*\)') -leftoverBraceRegex = re.compile(r'(?:{\||{{).*') - -def parseDesc(text: str) -> str | None: - # Find first matching line outside {{...}}, [[...]], and block-html-comment constructs, - # and then accumulate lines until a blank one. - # Some cases not accounted for include: disambiguation pages, abstracts with sentences split-across-lines, - # nested embedded html, 'content significant' embedded-html, markup not removable with mwparsefromhell, - lines: list[str] = [] - openBraceCount = 0 - openBracketCount = 0 - inComment = False - skip = False - for line in text.splitlines(): - line = line.strip() - if not lines: - if line: - if openBraceCount > 0 or line[0] == '{': - openBraceCount += line.count('{') - openBraceCount -= line.count('}') - skip = True - if openBracketCount > 0 or line[0] == '[': - openBracketCount += line.count('[') - openBracketCount -= line.count(']') - skip = True - if inComment or line.find('') != -1: - if inComment: - inComment = False - skip = True - else: - inComment = True - skip = True - if skip: - skip = False - continue - if line[-1] == ':': # Seems to help avoid disambiguation pages - return None - if descLineRegex.match(line) is not None: - lines.append(line) - else: - if not line: - return removeMarkup(' '.join(lines)) - lines.append(line) - if lines: - return removeMarkup(' '.join(lines)) - return None -def removeMarkup(content: str) -> str: - content = embeddedHtmlRegex.sub('', content) - content = convertTemplateRegex.sub(convertTemplateReplace, content) - content = mwparserfromhell.parse(content).strip_code() # Remove wikitext markup - content = parensGroupRegex.sub('', content) - content = leftoverBraceRegex.sub('', content) - return content -def convertTitle(title: str) -> str: - return html.unescape(title).replace('_', ' ') - -print('Creating database') -if os.path.exists(enwikiDb): - raise Exception(f'ERROR: Existing {enwikiDb}') -dbCon = sqlite3.connect(enwikiDb) -dbCur = dbCon.cursor() -dbCur.execute('CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)') -dbCur.execute('CREATE INDEX pages_title_idx ON pages(title COLLATE NOCASE)') -dbCur.execute('CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)') -dbCur.execute('CREATE INDEX redirects_idx ON redirects(target)') -dbCur.execute('CREATE TABLE descs (id INT PRIMARY KEY, desc TEXT)') - -print('Iterating through dump file') -with bz2.open(dumpFile, mode='rt') as file: - dump = mwxml.Dump.from_file(file) - pageNum = 0 - for page in dump: - pageNum += 1 - if pageNum % 1e4 == 0: - print(f'At page {pageNum}') - if pageNum > 3e4: - break - # Parse page - if page.namespace == 0: - try: - dbCur.execute('INSERT INTO pages VALUES (?, ?)', (page.id, convertTitle(page.title))) - except sqlite3.IntegrityError as e: - # Accounts for certain pages that have the same title - print(f'Failed to add page with title "{page.title}": {e}', file=sys.stderr) - continue - if page.redirect is not None: - dbCur.execute('INSERT INTO redirects VALUES (?, ?)', (page.id, convertTitle(page.redirect))) - else: - revision = next(page) - desc = parseDesc(revision.text) - if desc is not None: - dbCur.execute('INSERT INTO descs VALUES (?, ?)', (page.id, desc)) - -print('Closing database') -dbCon.commit() -dbCon.close() diff --git a/backend/tolData/enwiki/genDumpIndexDb.py b/backend/tolData/enwiki/genDumpIndexDb.py deleted file mode 100755 index 3bd129f..0000000 --- a/backend/tolData/enwiki/genDumpIndexDb.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/python3 - -import sys, os, re -import bz2 -import sqlite3 - -import argparse -parser = argparse.ArgumentParser(description=""" -Adds data from the wiki dump index-file into a database -""", formatter_class=argparse.RawDescriptionHelpFormatter) -parser.parse_args() - -indexFile = 'enwiki-20220501-pages-articles-multistream-index.txt.bz2' # Had about 22e6 lines -indexDb = 'dumpIndex.db' - -if os.path.exists(indexDb): - raise Exception(f'ERROR: Existing {indexDb}') -print('Creating database') -dbCon = sqlite3.connect(indexDb) -dbCur = dbCon.cursor() -dbCur.execute('CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)') - -print('Iterating through index file') -lineRegex = re.compile(r'([^:]+):([^:]+):(.*)') -lastOffset = 0 -lineNum = 0 -entriesToAdd: list[tuple[str, str]] = [] -with bz2.open(indexFile, mode='rt') as file: - for line in file: - lineNum += 1 - if lineNum % 1e5 == 0: - print(f'At line {lineNum}') - # - match = lineRegex.fullmatch(line.rstrip()) - assert match is not None - offsetStr, pageId, title = match.group(1,2,3) - offset = int(offsetStr) - if offset > lastOffset: - for t, p in entriesToAdd: - try: - dbCur.execute('INSERT INTO offsets VALUES (?, ?, ?, ?)', (t, int(p), lastOffset, offset)) - except sqlite3.IntegrityError as e: - # Accounts for certain entries in the file that have the same title - print(f'Failed on title "{t}": {e}', file=sys.stderr) - entriesToAdd = [] - lastOffset = offset - entriesToAdd.append((title, pageId)) -for title, pageId in entriesToAdd: - try: - dbCur.execute('INSERT INTO offsets VALUES (?, ?, ?, ?)', (title, int(pageId), lastOffset, -1)) - except sqlite3.IntegrityError as e: - print(f'Failed on title "{t}": {e}', file=sys.stderr) - -print('Closing database') -dbCon.commit() -dbCon.close() diff --git a/backend/tolData/enwiki/genImgData.py b/backend/tolData/enwiki/genImgData.py deleted file mode 100755 index 00140f6..0000000 --- a/backend/tolData/enwiki/genImgData.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python3 - -import re -import bz2, html, urllib.parse -import sqlite3 - -import argparse -parser = argparse.ArgumentParser(description=""" -For some set of page IDs, looks up their content in the wiki dump, -and tries to parse infobox image names, storing them into a database. - -The program can be re-run with an updated set of page IDs, and -will skip already-processed page IDs. -""", formatter_class=argparse.RawDescriptionHelpFormatter) -parser.parse_args() - -def getInputPageIds(): - pageIds: set[int] = set() - dbCon = sqlite3.connect('../data.db') - dbCur = dbCon.cursor() - for (pageId,) in dbCur.execute('SELECT id from wiki_ids'): - pageIds.add(pageId) - dbCon.close() - return pageIds -dumpFile = 'enwiki-20220501-pages-articles-multistream.xml.bz2' -indexDb = 'dumpIndex.db' -imgDb = 'imgData.db' # The database to create -idLineRegex = re.compile(r'(.*)') -imageLineRegex = re.compile(r'.*\| *image *= *([^|]*)') -bracketImageRegex = re.compile(r'\[\[(File:[^|]*).*]]') -imageNameRegex = re.compile(r'.*\.(jpg|jpeg|png|gif|tiff|tif)', flags=re.IGNORECASE) -cssImgCropRegex = re.compile(r'{{css image crop\|image *= *(.*)', flags=re.IGNORECASE) - -print('Getting input page-ids') -pageIds = getInputPageIds() -print(f'Found {len(pageIds)}') - -print('Opening databases') -indexDbCon = sqlite3.connect(indexDb) -indexDbCur = indexDbCon.cursor() -imgDbCon = sqlite3.connect(imgDb) -imgDbCur = imgDbCon.cursor() -print('Checking tables') -if imgDbCur.execute('SELECT name FROM sqlite_master WHERE type="table" AND name="page_imgs"').fetchone() is None: - # Create tables if not present - imgDbCur.execute('CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)') # img_name may be NULL - imgDbCur.execute('CREATE INDEX page_imgs_idx ON page_imgs(img_name)') -else: - # Check for already-processed page IDs - numSkipped = 0 - for (pid,) in imgDbCur.execute('SELECT page_id FROM page_imgs'): - if pid in pageIds: - pageIds.remove(pid) - numSkipped += 1 - else: - print(f'WARNING: Found already-processed page ID {pid} which was not in input set') - print(f'Will skip {numSkipped} already-processed page IDs') - -print('Getting dump-file offsets') -offsetToPageids: dict[int, list[int]] = {} -offsetToEnd: dict[int, int] = {} # Maps chunk-start offsets to their chunk-end offsets -iterNum = 0 -for pageId in pageIds: - iterNum += 1 - if iterNum % 1e4 == 0: - print(f'At iteration {iterNum}') - # - query = 'SELECT offset, next_offset FROM offsets WHERE id = ?' - row: tuple[int, int] | None = indexDbCur.execute(query, (pageId,)).fetchone() - if row is None: - print(f'WARNING: Page ID {pageId} not found') - continue - chunkOffset, endOffset = row - offsetToEnd[chunkOffset] = endOffset - if chunkOffset not in offsetToPageids: - offsetToPageids[chunkOffset] = [] - offsetToPageids[chunkOffset].append(pageId) -print(f'Found {len(offsetToEnd)} chunks to check') - -print('Iterating through chunks in dump file') -def getImageName(content: list[str]) -> str | None: - """ Given an array of text-content lines, tries to return an infoxbox image name, or None """ - # Doesn't try and find images in outside-infobox [[File:...]] and sections - for line in content: - match = imageLineRegex.match(line) - if match is not None: - imageName = match.group(1).strip() - if imageName == '': - return None - imageName = html.unescape(imageName) - # Account for {{... - if imageName.startswith('{'): - match = cssImgCropRegex.match(imageName) - if match is None: - return None - imageName = match.group(1) - # Account for [[File:...|...]] - if imageName.startswith('['): - match = bracketImageRegex.match(imageName) - if match is None: - return None - imageName = match.group(1) - # Account for |<[^([^<]*|[^<]*<[^<]+>[^<]*)|<[^<]+$') + # Recognises a self-closing HTML tag, a tag with 0 children, tag with 1 child with 0 children, or unclosed tag +CONVERT_TEMPLATE_REGEX = re.compile(r'{{convert\|(\d[^|]*)\|(?:(to|-)\|(\d[^|]*)\|)?([a-z][^|}]*)[^}]*}}') +def convertTemplateReplace(match): + """ Used in regex-substitution with CONVERT_TEMPLATE_REGEX """ + if match.group(2) is None: + return f'{match.group(1)} {match.group(4)}' + else: + return f'{match.group(1)} {match.group(2)} {match.group(3)} {match.group(4)}' +PARENS_GROUP_REGEX = re.compile(r' \([^()]*\)') +LEFTOVER_BRACE_REGEX = re.compile(r'(?:{\||{{).*') + +def genData(dumpFile: str, dbFile: str) -> None: + print('Creating database') + if os.path.exists(dbFile): + raise Exception(f'ERROR: Existing {dbFile}') + dbCon = sqlite3.connect(dbFile) + dbCur = dbCon.cursor() + dbCur.execute('CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)') + dbCur.execute('CREATE INDEX pages_title_idx ON pages(title COLLATE NOCASE)') + dbCur.execute('CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)') + dbCur.execute('CREATE INDEX redirects_idx ON redirects(target)') + dbCur.execute('CREATE TABLE descs (id INT PRIMARY KEY, desc TEXT)') + # + print('Iterating through dump file') + with bz2.open(dumpFile, mode='rt') as file: + for pageNum, page in enumerate(mwxml.Dump.from_file(file), 1): + if pageNum % 1e4 == 0: + print(f'At page {pageNum}') + # Parse page + if page.namespace == 0: + try: + dbCur.execute('INSERT INTO pages VALUES (?, ?)', (page.id, convertTitle(page.title))) + except sqlite3.IntegrityError as e: + # Accounts for certain pages that have the same title + print(f'Failed to add page with title "{page.title}": {e}', file=sys.stderr) + continue + if page.redirect is not None: + dbCur.execute('INSERT INTO redirects VALUES (?, ?)', (page.id, convertTitle(page.redirect))) + else: + revision = next(page) + desc = parseDesc(revision.text) + if desc is not None: + dbCur.execute('INSERT INTO descs VALUES (?, ?)', (page.id, desc)) + # + print('Closing database') + dbCon.commit() + dbCon.close() +def parseDesc(text: str) -> str | None: + # Find first matching line outside {{...}}, [[...]], and block-html-comment constructs, + # and then accumulate lines until a blank one. + # Some cases not accounted for include: disambiguation pages, abstracts with sentences split-across-lines, + # nested embedded html, 'content significant' embedded-html, markup not removable with mwparsefromhell, + lines: list[str] = [] + openBraceCount = 0 + openBracketCount = 0 + inComment = False + skip = False + for line in text.splitlines(): + line = line.strip() + if not lines: + if line: + if openBraceCount > 0 or line[0] == '{': + openBraceCount += line.count('{') + openBraceCount -= line.count('}') + skip = True + if openBracketCount > 0 or line[0] == '[': + openBracketCount += line.count('[') + openBracketCount -= line.count(']') + skip = True + if inComment or line.find('') != -1: + if inComment: + inComment = False + skip = True + else: + inComment = True + skip = True + if skip: + skip = False + continue + if line[-1] == ':': # Seems to help avoid disambiguation pages + return None + if DESC_LINE_REGEX.match(line) is not None: + lines.append(line) + else: + if not line: + return removeMarkup(' '.join(lines)) + lines.append(line) + if lines: + return removeMarkup(' '.join(lines)) + return None +def removeMarkup(content: str) -> str: + content = EMBEDDED_HTML_REGEX.sub('', content) + content = CONVERT_TEMPLATE_REGEX.sub(convertTemplateReplace, content) + content = mwparserfromhell.parse(content).strip_code() # Remove wikitext markup + content = PARENS_GROUP_REGEX.sub('', content) + content = LEFTOVER_BRACE_REGEX.sub('', content) + return content +def convertTitle(title: str) -> str: + return html.unescape(title).replace('_', ' ') + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.parse_args() + # + genData(DUMP_FILE, DB_FILE) diff --git a/backend/tol_data/enwiki/gen_dump_index_db.py b/backend/tol_data/enwiki/gen_dump_index_db.py new file mode 100755 index 0000000..5f21c9b --- /dev/null +++ b/backend/tol_data/enwiki/gen_dump_index_db.py @@ -0,0 +1,60 @@ +#!/usr/bin/python3 + +""" +Adds data from the wiki dump index-file into a database +""" +import sys, os, re +import bz2 +import sqlite3 + +INDEX_FILE = 'enwiki-20220501-pages-articles-multistream-index.txt.bz2' # Had about 22e6 lines +DB_FILE = 'dumpIndex.db' + +def genData(indexFile: str, dbFile: str) -> None: + """ Reads the index file and creates the db """ + if os.path.exists(dbFile): + raise Exception(f'ERROR: Existing {dbFile}') + print('Creating database') + dbCon = sqlite3.connect(dbFile) + dbCur = dbCon.cursor() + dbCur.execute('CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)') + print('Iterating through index file') + lineRegex = re.compile(r'([^:]+):([^:]+):(.*)') + lastOffset = 0 + lineNum = 0 + entriesToAdd: list[tuple[str, str]] = [] + with bz2.open(indexFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f'At line {lineNum}') + # + match = lineRegex.fullmatch(line.rstrip()) + assert match is not None + offsetStr, pageId, title = match.group(1,2,3) + offset = int(offsetStr) + if offset > lastOffset: + for t, p in entriesToAdd: + try: + dbCur.execute('INSERT INTO offsets VALUES (?, ?, ?, ?)', (t, int(p), lastOffset, offset)) + except sqlite3.IntegrityError as e: + # Accounts for certain entries in the file that have the same title + print(f'Failed on title "{t}": {e}', file=sys.stderr) + entriesToAdd = [] + lastOffset = offset + entriesToAdd.append((title, pageId)) + for title, pageId in entriesToAdd: + try: + dbCur.execute('INSERT INTO offsets VALUES (?, ?, ?, ?)', (title, int(pageId), lastOffset, -1)) + except sqlite3.IntegrityError as e: + print(f'Failed on title "{t}": {e}', file=sys.stderr) + print('Closing database') + dbCon.commit() + dbCon.close() + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.parse_args() + # + genData(INDEX_FILE, DB_FILE) diff --git a/backend/tol_data/enwiki/gen_img_data.py b/backend/tol_data/enwiki/gen_img_data.py new file mode 100755 index 0000000..d4696f0 --- /dev/null +++ b/backend/tol_data/enwiki/gen_img_data.py @@ -0,0 +1,193 @@ +#!/usr/bin/python3 + +""" +For some set of page IDs, looks up their content in the wiki dump, +and tries to parse infobox image names, storing them into a database. + +The program can be re-run with an updated set of page IDs, and +will skip already-processed page IDs. +""" + +import re +import os, bz2, html, urllib.parse +import sqlite3 + +DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2' +INDEX_DB = 'dumpIndex.db' +IMG_DB = 'img_data.db' # The database to create +DB_FILE = os.path.join('..', 'data.db') +# +ID_LINE_REGEX = re.compile(r'(.*)') +IMG_LINE_REGEX = re.compile(r'.*\| *image *= *([^|]*)') +BRACKET_IMG_REGEX = re.compile(r'\[\[(File:[^|]*).*]]') +IMG_NAME_REGEX = re.compile(r'.*\.(jpg|jpeg|png|gif|tiff|tif)', flags=re.IGNORECASE) +CSS_IMG_CROP_REGEX = re.compile(r'{{css image crop\|image *= *(.*)', flags=re.IGNORECASE) + +def genData(pageIds: set[int], dumpFile: str, indexDb: str, imgDb: str) -> None: + print('Opening databases') + indexDbCon = sqlite3.connect(indexDb) + indexDbCur = indexDbCon.cursor() + imgDbCon = sqlite3.connect(imgDb) + imgDbCur = imgDbCon.cursor() + print('Checking tables') + if imgDbCur.execute('SELECT name FROM sqlite_master WHERE type="table" AND name="page_imgs"').fetchone() is None: + # Create tables if not present + imgDbCur.execute('CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)') # img_name may be NULL + imgDbCur.execute('CREATE INDEX page_imgs_idx ON page_imgs(img_name)') + else: + # Check for already-processed page IDs + numSkipped = 0 + for (pid,) in imgDbCur.execute('SELECT page_id FROM page_imgs'): + if pid in pageIds: + pageIds.remove(pid) + numSkipped += 1 + else: + print(f'Found already-processed page ID {pid} which was not in input set') + print(f'Will skip {numSkipped} already-processed page IDs') + # + print('Getting dump-file offsets') + offsetToPageids: dict[int, list[int]] = {} + offsetToEnd: dict[int, int] = {} # Maps chunk-start offsets to their chunk-end offsets + iterNum = 0 + for pageId in pageIds: + iterNum += 1 + if iterNum % 1e4 == 0: + print(f'At iteration {iterNum}') + # + query = 'SELECT offset, next_offset FROM offsets WHERE id = ?' + row: tuple[int, int] | None = indexDbCur.execute(query, (pageId,)).fetchone() + if row is None: + print(f'WARNING: Page ID {pageId} not found') + continue + chunkOffset, endOffset = row + offsetToEnd[chunkOffset] = endOffset + if chunkOffset not in offsetToPageids: + offsetToPageids[chunkOffset] = [] + offsetToPageids[chunkOffset].append(pageId) + print(f'Found {len(offsetToEnd)} chunks to check') + # + print('Iterating through chunks in dump file') + with open(dumpFile, mode='rb') as file: + iterNum = 0 + for pageOffset, endOffset in offsetToEnd.items(): + iterNum += 1 + if iterNum % 100 == 0: + print(f'At iteration {iterNum}') + # + chunkPageIds = offsetToPageids[pageOffset] + # Jump to chunk + file.seek(pageOffset) + compressedData = file.read(None if endOffset == -1 else endOffset - pageOffset) + data = bz2.BZ2Decompressor().decompress(compressedData).decode() + # Look in chunk for pages + lines = data.splitlines() + lineIdx = 0 + while lineIdx < len(lines): + # Look for + if lines[lineIdx].lstrip() != '': + lineIdx += 1 + continue + # Check page id + lineIdx += 3 + idLine = lines[lineIdx].lstrip() + match = ID_LINE_REGEX.fullmatch(idLine) + if match is None or int(match.group(1)) not in chunkPageIds: + lineIdx += 1 + continue + pageId = int(match.group(1)) + lineIdx += 1 + # Look for in + foundText = False + while lineIdx < len(lines): + if not lines[lineIdx].lstrip().startswith('') + 1:]) + lineIdx += 1 + foundTextEnd = False + while lineIdx < len(lines): + line = lines[lineIdx] + if not line.endswith(''): + content.append(line) + lineIdx += 1 + continue + foundTextEnd = True + content.append(line[:line.rfind('')]) + # Look for image-filename + imageName = getImageName(content) + imgDbCur.execute('INSERT into page_imgs VALUES (?, ?)', (pageId, imageName)) + break + if not foundTextEnd: + print(f'WARNING: Did not find for page id {pageId}') + break + if not foundText: + print(f'WARNING: Did not find for page id {pageId}') + # + print('Closing databases') + indexDbCon.close() + imgDbCon.commit() + imgDbCon.close() +def getImageName(content: list[str]) -> str | None: + """ Given an array of text-content lines, tries to return an infoxbox image name, or None """ + # Doesn't try and find images in outside-infobox [[File:...]] and sections + for line in content: + match = IMG_LINE_REGEX.match(line) + if match is not None: + imageName = match.group(1).strip() + if imageName == '': + return None + imageName = html.unescape(imageName) + # Account for {{... + if imageName.startswith('{'): + match = CSS_IMG_CROP_REGEX.match(imageName) + if match is None: + return None + imageName = match.group(1) + # Account for [[File:...|...]] + if imageName.startswith('['): + match = BRACKET_IMG_REGEX.match(imageName) + if match is None: + return None + imageName = match.group(1) + # Account for