aboutsummaryrefslogtreecommitdiff
path: root/backend
diff options
context:
space:
mode:
authorTerry Truong <terry06890@gmail.com>2023-01-23 18:00:43 +1100
committerTerry Truong <terry06890@gmail.com>2023-01-23 18:01:13 +1100
commit94a8ad9b067e5a2c442ce47ce72d1a53eb444160 (patch)
tree2056373ee56b8b2f8269ac3e94d40f8f0e6eec0d /backend
parent796c4e5660b1006575b8f2af9d99e2ce592c767a (diff)
Clean up some docs and naming inconsistencies
Diffstat (limited to 'backend')
-rw-r--r--backend/README.md10
-rwxr-xr-xbackend/server.py8
-rw-r--r--backend/tests/wikidata/test_gen_taxon_src_data.py24
-rwxr-xr-xbackend/tilo.py18
-rw-r--r--backend/tol_data/README.md6
-rw-r--r--backend/tol_data/enwiki/README.md22
-rwxr-xr-xbackend/tol_data/enwiki/download_img_license_info.py3
-rwxr-xr-xbackend/tol_data/enwiki/download_imgs.py1
-rwxr-xr-xbackend/tol_data/enwiki/gen_desc_data.py4
-rwxr-xr-xbackend/tol_data/enwiki/gen_dump_index_db.py2
-rwxr-xr-xbackend/tol_data/enwiki/gen_img_data.py2
-rwxr-xr-xbackend/tol_data/enwiki/gen_pageview_data.py3
-rwxr-xr-xbackend/tol_data/enwiki/lookup_page.py2
-rwxr-xr-xbackend/tol_data/gen_imgs.py2
-rwxr-xr-xbackend/tol_data/gen_mapping_data.py2
-rw-r--r--backend/tol_data/picked_imgs/README.md2
-rw-r--r--backend/tol_data/wikidata/README.md4
-rwxr-xr-xbackend/tol_data/wikidata/gen_taxon_src_data.py6
18 files changed, 57 insertions, 64 deletions
diff --git a/backend/README.md b/backend/README.md
index 0f3f332..06cbbec 100644
--- a/backend/README.md
+++ b/backend/README.md
@@ -1,9 +1,9 @@
# Files
-- **tol_data/**: Holds scripts for generating the tree-of-life database
-- **tilo.py**: WSGI script that serves data from the tree-of-life database. <br>
- Note: WSGI is used instead of CGI to avoid starting a new process for each request.
-- **server.py**: Basic dev server that serves the WSGI script and image files
-- **tests/**: Holds unit testing scripts.<br>
+- `tol_data/`: Holds scripts for generating the tree-of-life database and images
+- `tilo.py`: WSGI script that serves data from the tree-of-life database <br>
+ Note: WSGI is used instead of CGI to avoid starting a new process for each request
+- `server.py`: Basic dev server that serves the WSGI script and image files
+- `tests/`: Holds unit testing scripts. <br>
Running all tests: `python -m unittest discover -s tests` <br>
Running a particular test: `python -m unittest tests/test_script1.py` <br>
Getting code coverage info (uses python package 'coverage'): <br>
diff --git a/backend/server.py b/backend/server.py
index 5b4d050..c953a9f 100755
--- a/backend/server.py
+++ b/backend/server.py
@@ -1,5 +1,9 @@
#!/usr/bin/python3
+"""
+Runs a basic dev server that serves a WSGI script and image files
+"""
+
from typing import Iterable
import os
from wsgiref import simple_server, util
@@ -7,9 +11,7 @@ import mimetypes
from tilo import application
import argparse
-parser = argparse.ArgumentParser(description="""
-Runs a basic dev server that serves a WSGI script and image files
-""")
+parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.parse_args()
def wrappingApp(environ: dict[str, str], start_response) -> Iterable[bytes]:
diff --git a/backend/tests/wikidata/test_gen_taxon_src_data.py b/backend/tests/wikidata/test_gen_taxon_src_data.py
index 1f886b3..9e66fe7 100644
--- a/backend/tests/wikidata/test_gen_taxon_src_data.py
+++ b/backend/tests/wikidata/test_gen_taxon_src_data.py
@@ -92,18 +92,18 @@ class TestGenData(unittest.TestCase):
('dog', 'endangered'),
}
def test_wikiItems(self):
- srcMap, iucnMap = runGenData(self.testWikiItems, False, 1)
- self.assertEqual(srcMap, self.expectedSrcRows)
- self.assertEqual(iucnMap, self.expectedIucnRows)
+ srcRows, iucnRows = runGenData(self.testWikiItems, False, 1)
+ self.assertEqual(srcRows, self.expectedSrcRows)
+ self.assertEqual(iucnRows, self.expectedIucnRows)
def test_empty_dump(self):
- srcMap, iucnMap = runGenData([{}], False, 1)
- self.assertEqual(srcMap, set())
- self.assertEqual(iucnMap, set())
+ srcRows, iucnRows = runGenData([{}], False, 1)
+ self.assertEqual(srcRows, set())
+ self.assertEqual(iucnRows, set())
def test_multiprocessing(self):
- srcMap, iucnMap = runGenData(self.testWikiItems, False, 4)
- self.assertEqual(srcMap, self.expectedSrcRows)
- self.assertEqual(iucnMap, self.expectedIucnRows)
+ srcRows, iucnRows = runGenData(self.testWikiItems, False, 4)
+ self.assertEqual(srcRows, self.expectedSrcRows)
+ self.assertEqual(iucnRows, self.expectedIucnRows)
def test_existing_offsets(self):
- srcMap, iucnMap = runGenData(self.testWikiItems, True, 3)
- self.assertEqual(srcMap, self.expectedSrcRows)
- self.assertEqual(iucnMap, self.expectedIucnRows)
+ srcRows, iucnRows = runGenData(self.testWikiItems, True, 3)
+ self.assertEqual(srcRows, self.expectedSrcRows)
+ self.assertEqual(iucnRows, self.expectedIucnRows)
diff --git a/backend/tilo.py b/backend/tilo.py
index dfefab1..21b5a7f 100755
--- a/backend/tilo.py
+++ b/backend/tilo.py
@@ -1,11 +1,4 @@
-#!/usr/bin/python3
-
-from typing import Iterable, cast
-import sys, re
-import urllib.parse, sqlite3
-import gzip, jsonpickle
-
-HELP_INFO = """
+"""
WSGI script that serves tree-of-life data, in JSON form.
Expected HTTP query parameters:
@@ -23,10 +16,11 @@ Expected HTTP query parameters:
weakly-trimmed, images-only, and picked-nodes trees. The default
is 'images'.
"""
-if __name__ == '__main__':
- import argparse
- parser = argparse.ArgumentParser(description=HELP_INFO, formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.parse_args()
+
+from typing import Iterable, cast
+import sys, re
+import urllib.parse, sqlite3
+import gzip, jsonpickle
DB_FILE = 'tol_data/data.db'
DEFAULT_SUGG_LIM = 5
diff --git a/backend/tol_data/README.md b/backend/tol_data/README.md
index a21418b..f1bbf6b 100644
--- a/backend/tol_data/README.md
+++ b/backend/tol_data/README.md
@@ -3,7 +3,7 @@ This directory holds files used to generate the tree-of-life database data.db.
# Database Tables
## Tree Structure
- `nodes` <br>
- Format : `name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT` <br>
+ Format: `name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT` <br>
Represents a tree-of-life node. `tips` holds the number of no-child descendants
- `edges` <br>
Format: `parent TEXT, child TEXT, p_support INT, PRIMARY KEY (parent, child)` <br>
@@ -59,10 +59,8 @@ takes several days, and occupies over 200 GB.
## Environment
Some of the scripts use third-party packages:
- `indexed_bzip2`: For parallelised bzip2 processing.
-- `jsonpickle`: For encoding class objects as JSON.
- `requests`: For downloading data.
-- `PIL`: For image processing.
-- `tkinter`: For providing a basic GUI to review images.
+- `Pillow`: For image processing.
- `mwxml`, `mwparserfromhell`: For parsing Wikipedia dumps.
## Generate Tree Structure Data
diff --git a/backend/tol_data/enwiki/README.md b/backend/tol_data/enwiki/README.md
index ba1de33..6f27d7f 100644
--- a/backend/tol_data/enwiki/README.md
+++ b/backend/tol_data/enwiki/README.md
@@ -14,12 +14,12 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
# Dump-Index Files
- `gen_dump_index_db.py` <br>
Creates a database version of the enwiki-dump index file.
-- `dumpIndex.db` <br>
+- `dump_index.db` <br>
Generated by `gen_dump_index_db.py`. <br>
Tables: <br>
- `offsets`: `title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT`
-# Description Database Files
+# Description Files
- `gen_desc_data.py` <br>
Reads through pages in the dump file, and adds short-description info to a database.
- `desc_data.db` <br>
@@ -29,20 +29,20 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
- `redirects`: `id INT PRIMARY KEY, target TEXT`
- `descs`: `id INT PRIMARY KEY, desc TEXT`
-# Image Database Files
+# Image Files
- `gen_img_data.py` <br>
- Used to find infobox image names for page IDs, storing them into a database.
-- `downloadImgLicenseInfo.py` <br>
- Used to download licensing metadata for image names, via wikipedia's online API, storing them into a database.
+ Used to find infobox image names for page IDs, and store them into a database.
+- `download_img_license_info.py` <br>
+ Used to download licensing metadata for image names, via wikipedia's online API, and store them into a database.
- `img_data.db` <br>
- Used to hold metadata about infobox images for a set of pageIDs.
+ Used to hold metadata about infobox images for a set of page IDs.
Generated using `get_enwiki_img_data.py` and `download_img_license_info.py`. <br>
Tables: <br>
- `page_imgs`: `page_id INT PRIMAY KEY, img_name TEXT` <br>
- `img_name` may be null, which means 'none found', and is used to avoid re-processing page-ids.
+ `img_name` may be null, which means 'none found', and is used to avoid re-processing page IDs.
- `imgs`: `name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT` <br>
Might lack some matches for `img_name` in `page_imgs`, due to licensing info unavailability.
-- `downloadImgs.py` <br>
+- `download_imgs.py` <br>
Used to download image files into imgs/.
# Page View Files
@@ -51,7 +51,7 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
Obtained via <https://dumps.wikimedia.org/other/pageview_complete/monthly/>.
Some format info was available from <https://dumps.wikimedia.org/other/pageview_complete/readme.html>.
- `gen_pageview_data.py` <br>
- Reads pageview/*, and creates a database holding average monthly pageview counts.
+ Reads pageview/* and `dump_index.db`, and creates a database holding average monthly pageview counts.
- `pageview_data.db` <br>
Generated using `gen_pageview_data.py`. <br>
Tables: <br>
@@ -60,4 +60,4 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
# Other Files
- `lookup_page.py` <br>
Running `lookup_page.py title1` looks in the dump for a page with a given title,
- and prints the contents to stdout. Uses dumpIndex.db.
+ and prints the contents to stdout. Uses dump_index.db.
diff --git a/backend/tol_data/enwiki/download_img_license_info.py b/backend/tol_data/enwiki/download_img_license_info.py
index 0a809ac..17e15b4 100755
--- a/backend/tol_data/enwiki/download_img_license_info.py
+++ b/backend/tol_data/enwiki/download_img_license_info.py
@@ -89,7 +89,7 @@ def downloadInfo(imgDb: str) -> None:
continue
# Parse response-object
if 'query' not in responseObj or 'pages' not in responseObj['query']:
- print('WARNING: Response object for doesn\'t have page data')
+ print('WARNING: Response object doesn\'t have page data')
print('\tImage batch: ' + '|'.join(imgBatch))
if 'error' in responseObj:
errorCode = responseObj['error']['code']
@@ -138,7 +138,6 @@ def downloadInfo(imgDb: str) -> None:
credit = html.unescape(credit)
credit = urllib.parse.unquote(credit)
# Add to db
- print((title, license, artist, credit, restrictions, url))
dbCur.execute('INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)',
(title, license, artist, credit, restrictions, url))
#
diff --git a/backend/tol_data/enwiki/download_imgs.py b/backend/tol_data/enwiki/download_imgs.py
index ba874e1..c6a1c21 100755
--- a/backend/tol_data/enwiki/download_imgs.py
+++ b/backend/tol_data/enwiki/download_imgs.py
@@ -75,7 +75,6 @@ def downloadImgs(imgDb: str, outDir: str, timeout: int) -> None:
print(f'WARNING: No filename extension found in URL {url}')
continue
outFile = os.path.join(outDir, f'{pageId}{extension}')
- print(outFile)
headers = {
'user-agent': USER_AGENT,
'accept-encoding': 'gzip',
diff --git a/backend/tol_data/enwiki/gen_desc_data.py b/backend/tol_data/enwiki/gen_desc_data.py
index 0dca16b..b3fde52 100755
--- a/backend/tol_data/enwiki/gen_desc_data.py
+++ b/backend/tol_data/enwiki/gen_desc_data.py
@@ -1,8 +1,8 @@
#!/usr/bin/python3
"""
-Reads through the wiki dump, and attempts to parse short-descriptions,
-and add them to a database
+Reads through the wiki dump, attempts to parse short-descriptions,
+and adds them to a database
"""
# In testing, this script took over 10 hours to run, and generated about 5GB
diff --git a/backend/tol_data/enwiki/gen_dump_index_db.py b/backend/tol_data/enwiki/gen_dump_index_db.py
index 5f21c9b..5778680 100755
--- a/backend/tol_data/enwiki/gen_dump_index_db.py
+++ b/backend/tol_data/enwiki/gen_dump_index_db.py
@@ -8,7 +8,7 @@ import bz2
import sqlite3
INDEX_FILE = 'enwiki-20220501-pages-articles-multistream-index.txt.bz2' # Had about 22e6 lines
-DB_FILE = 'dumpIndex.db'
+DB_FILE = 'dump_index.db'
def genData(indexFile: str, dbFile: str) -> None:
""" Reads the index file and creates the db """
diff --git a/backend/tol_data/enwiki/gen_img_data.py b/backend/tol_data/enwiki/gen_img_data.py
index d4696f0..040f223 100755
--- a/backend/tol_data/enwiki/gen_img_data.py
+++ b/backend/tol_data/enwiki/gen_img_data.py
@@ -13,7 +13,7 @@ import os, bz2, html, urllib.parse
import sqlite3
DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2'
-INDEX_DB = 'dumpIndex.db'
+INDEX_DB = 'dump_index.db'
IMG_DB = 'img_data.db' # The database to create
DB_FILE = os.path.join('..', 'data.db')
#
diff --git a/backend/tol_data/enwiki/gen_pageview_data.py b/backend/tol_data/enwiki/gen_pageview_data.py
index ce3b674..8aee1cc 100755
--- a/backend/tol_data/enwiki/gen_pageview_data.py
+++ b/backend/tol_data/enwiki/gen_pageview_data.py
@@ -12,7 +12,7 @@ from collections import defaultdict
import bz2, sqlite3
PAGEVIEW_FILES = glob.glob('./pageviews/pageviews-*-user.bz2')
-DUMP_INDEX_DB = 'dumpIndex.db'
+DUMP_INDEX_DB = 'dump_index.db'
DB_FILE = 'pageview_data.db'
def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None:
@@ -42,6 +42,7 @@ def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None:
if namespaceRegex.match(title) is not None:
continue
# Update map
+ title = title.replace('_', ' ')
titleToViews[title] += viewCount
print(f'Found {len(titleToViews)} titles')
#
diff --git a/backend/tol_data/enwiki/lookup_page.py b/backend/tol_data/enwiki/lookup_page.py
index 8ef1229..f744818 100755
--- a/backend/tol_data/enwiki/lookup_page.py
+++ b/backend/tol_data/enwiki/lookup_page.py
@@ -10,7 +10,7 @@ import bz2
import sqlite3
DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2'
-INDEX_DB = 'dumpIndex.db'
+INDEX_DB = 'dump_index.db'
def lookupPage(dumpFile: str, indexDb: str, pageTitle: str) -> None:
print('Looking up offset in index db')
diff --git a/backend/tol_data/gen_imgs.py b/backend/tol_data/gen_imgs.py
index 6d54e4d..0ba75ec 100755
--- a/backend/tol_data/gen_imgs.py
+++ b/backend/tol_data/gen_imgs.py
@@ -115,7 +115,7 @@ def processPickedImgs(
def processImgs(
imgListFile: str, eolImgDir: str, eolImgDb: str, enwikiImgDb: str,
nodesDone: set[str], imgsDone: set[ImgId], outDir: str, dbCur: sqlite3.Cursor) -> bool:
- """ Converts EOL and enwiki images, and updates db, returning False upon interrupted or failure """
+ """ Converts EOL and enwiki images, and updates db, returning False upon interruption or failure """
eolCon = sqlite3.connect(eolImgDb)
eolCur = eolCon.cursor()
enwikiCon = sqlite3.connect(enwikiImgDb)
diff --git a/backend/tol_data/gen_mapping_data.py b/backend/tol_data/gen_mapping_data.py
index 95e930b..4373d1d 100755
--- a/backend/tol_data/gen_mapping_data.py
+++ b/backend/tol_data/gen_mapping_data.py
@@ -19,7 +19,7 @@ import gzip, csv, sqlite3
TAXONOMY_FILE = os.path.join('otol', 'taxonomy.tsv')
EOL_IDS_FILE = os.path.join('eol', 'provider_ids.csv.gz')
WIKIDATA_DB = os.path.join('wikidata', 'taxon_srcs.db')
-ENWIKI_DUMP_INDEX_DB = os.path.join('enwiki', 'dumpIndex.db')
+ENWIKI_DUMP_INDEX_DB = os.path.join('enwiki', 'dump_index.db')
PICKED_MAPPINGS = {
'eol': ['picked_eol_ids.txt'],
'enwiki': ['picked_wiki_ids.txt', 'picked_wiki_ids_rough.txt']
diff --git a/backend/tol_data/picked_imgs/README.md b/backend/tol_data/picked_imgs/README.md
index 1edd951..71c13c0 100644
--- a/backend/tol_data/picked_imgs/README.md
+++ b/backend/tol_data/picked_imgs/README.md
@@ -4,7 +4,7 @@ on top of those from EOL and Wikipedia.
Possible Files
==============
- (Image files)
-- img_data.txt <br>
+- `img_data.txt` <br>
Contains lines with the format `filename|url|license|artist|credit`.
The filename should consist of a node name, with an image extension.
Other fields correspond to those in the `images` table (see ../README.md).
diff --git a/backend/tol_data/wikidata/README.md b/backend/tol_data/wikidata/README.md
index 7b3105e..806b315 100644
--- a/backend/tol_data/wikidata/README.md
+++ b/backend/tol_data/wikidata/README.md
@@ -1,4 +1,4 @@
-This directory holds files obtained via [Wikidata](https://www.wikidata.org/).
+This directory holds files obtained/derived from [Wikidata](https://www.wikidata.org/).
# Downloaded Files
- `latest-all.json.bz2` <br>
@@ -10,7 +10,7 @@ This directory holds files obtained via [Wikidata](https://www.wikidata.org/).
Used to generate a database holding taxon information from the dump.
- `offsets.dat` <br>
Holds bzip2 block offsets for the dump. Generated and used by
- genTaxonSrcData.py for parallel processing of the dump.
+ gen_taxon_src_data.py for parallel processing of the dump.
- `taxon_srcs.db` <br>
Generated by `gen_taxon_src_data.py`. <br>
Tables: <br>
diff --git a/backend/tol_data/wikidata/gen_taxon_src_data.py b/backend/tol_data/wikidata/gen_taxon_src_data.py
index 50ed917..1bddb6e 100755
--- a/backend/tol_data/wikidata/gen_taxon_src_data.py
+++ b/backend/tol_data/wikidata/gen_taxon_src_data.py
@@ -50,7 +50,7 @@ IUCN_STATUS_IDS = {
'Q237350': 'extinct species', 'Q3245245': 'data deficient'
}
# For filtering lines before parsing JSON
-LINE_REGEX = re.compile(('"id":(?:"' + '"|"'.join([s for s in TAXON_IDS + TAXON_ALT_IDS]) + '")\D').encode())
+LINE_REGEX = re.compile(('"id":(?:"' + '"|"'.join([s for s in TAXON_IDS + TAXON_ALT_IDS]) + '")').encode())
def genData(wikidataFile: str, offsetsFile: str, dbFile: str, nProcs: int) -> None:
""" Reads the dump and writes source/iucn info to db """
@@ -92,8 +92,8 @@ def genData(wikidataFile: str, offsetsFile: str, dbFile: str, nProcs: int) -> No
with multiprocessing.Pool(processes=nProcs, maxtasksperchild=1) as pool:
for outFilename in pool.map(
readDumpChunkOneParam,
- ((i, wikidataFile, offsetsFile, chunkIdxs[i], chunkIdxs[i+1],
- os.path.join(tempDirName, f'{i}.pickle')) for i in range(nProcs))):
+ [(i, wikidataFile, offsetsFile, chunkIdxs[i], chunkIdxs[i+1],
+ os.path.join(tempDirName, f'{i}.pickle')) for i in range(nProcs)]):
# Get map data from subprocess output file
with open(outFilename, 'rb') as file:
maps = pickle.load(file)