diff options
| author | Terry Truong <terry06890@gmail.com> | 2023-01-23 18:00:43 +1100 |
|---|---|---|
| committer | Terry Truong <terry06890@gmail.com> | 2023-01-23 18:01:13 +1100 |
| commit | 94a8ad9b067e5a2c442ce47ce72d1a53eb444160 (patch) | |
| tree | 2056373ee56b8b2f8269ac3e94d40f8f0e6eec0d /backend/tol_data | |
| parent | 796c4e5660b1006575b8f2af9d99e2ce592c767a (diff) | |
Clean up some docs and naming inconsistencies
Diffstat (limited to 'backend/tol_data')
| -rw-r--r-- | backend/tol_data/README.md | 6 | ||||
| -rw-r--r-- | backend/tol_data/enwiki/README.md | 22 | ||||
| -rwxr-xr-x | backend/tol_data/enwiki/download_img_license_info.py | 3 | ||||
| -rwxr-xr-x | backend/tol_data/enwiki/download_imgs.py | 1 | ||||
| -rwxr-xr-x | backend/tol_data/enwiki/gen_desc_data.py | 4 | ||||
| -rwxr-xr-x | backend/tol_data/enwiki/gen_dump_index_db.py | 2 | ||||
| -rwxr-xr-x | backend/tol_data/enwiki/gen_img_data.py | 2 | ||||
| -rwxr-xr-x | backend/tol_data/enwiki/gen_pageview_data.py | 3 | ||||
| -rwxr-xr-x | backend/tol_data/enwiki/lookup_page.py | 2 | ||||
| -rwxr-xr-x | backend/tol_data/gen_imgs.py | 2 | ||||
| -rwxr-xr-x | backend/tol_data/gen_mapping_data.py | 2 | ||||
| -rw-r--r-- | backend/tol_data/picked_imgs/README.md | 2 | ||||
| -rw-r--r-- | backend/tol_data/wikidata/README.md | 4 | ||||
| -rwxr-xr-x | backend/tol_data/wikidata/gen_taxon_src_data.py | 6 |
14 files changed, 29 insertions, 32 deletions
diff --git a/backend/tol_data/README.md b/backend/tol_data/README.md index a21418b..f1bbf6b 100644 --- a/backend/tol_data/README.md +++ b/backend/tol_data/README.md @@ -3,7 +3,7 @@ This directory holds files used to generate the tree-of-life database data.db. # Database Tables ## Tree Structure - `nodes` <br> - Format : `name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT` <br> + Format: `name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT` <br> Represents a tree-of-life node. `tips` holds the number of no-child descendants - `edges` <br> Format: `parent TEXT, child TEXT, p_support INT, PRIMARY KEY (parent, child)` <br> @@ -59,10 +59,8 @@ takes several days, and occupies over 200 GB. ## Environment Some of the scripts use third-party packages: - `indexed_bzip2`: For parallelised bzip2 processing. -- `jsonpickle`: For encoding class objects as JSON. - `requests`: For downloading data. -- `PIL`: For image processing. -- `tkinter`: For providing a basic GUI to review images. +- `Pillow`: For image processing. - `mwxml`, `mwparserfromhell`: For parsing Wikipedia dumps. ## Generate Tree Structure Data diff --git a/backend/tol_data/enwiki/README.md b/backend/tol_data/enwiki/README.md index ba1de33..6f27d7f 100644 --- a/backend/tol_data/enwiki/README.md +++ b/backend/tol_data/enwiki/README.md @@ -14,12 +14,12 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. # Dump-Index Files - `gen_dump_index_db.py` <br> Creates a database version of the enwiki-dump index file. -- `dumpIndex.db` <br> +- `dump_index.db` <br> Generated by `gen_dump_index_db.py`. <br> Tables: <br> - `offsets`: `title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT` -# Description Database Files +# Description Files - `gen_desc_data.py` <br> Reads through pages in the dump file, and adds short-description info to a database. - `desc_data.db` <br> @@ -29,20 +29,20 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. - `redirects`: `id INT PRIMARY KEY, target TEXT` - `descs`: `id INT PRIMARY KEY, desc TEXT` -# Image Database Files +# Image Files - `gen_img_data.py` <br> - Used to find infobox image names for page IDs, storing them into a database. -- `downloadImgLicenseInfo.py` <br> - Used to download licensing metadata for image names, via wikipedia's online API, storing them into a database. + Used to find infobox image names for page IDs, and store them into a database. +- `download_img_license_info.py` <br> + Used to download licensing metadata for image names, via wikipedia's online API, and store them into a database. - `img_data.db` <br> - Used to hold metadata about infobox images for a set of pageIDs. + Used to hold metadata about infobox images for a set of page IDs. Generated using `get_enwiki_img_data.py` and `download_img_license_info.py`. <br> Tables: <br> - `page_imgs`: `page_id INT PRIMAY KEY, img_name TEXT` <br> - `img_name` may be null, which means 'none found', and is used to avoid re-processing page-ids. + `img_name` may be null, which means 'none found', and is used to avoid re-processing page IDs. - `imgs`: `name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT` <br> Might lack some matches for `img_name` in `page_imgs`, due to licensing info unavailability. -- `downloadImgs.py` <br> +- `download_imgs.py` <br> Used to download image files into imgs/. # Page View Files @@ -51,7 +51,7 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. Obtained via <https://dumps.wikimedia.org/other/pageview_complete/monthly/>. Some format info was available from <https://dumps.wikimedia.org/other/pageview_complete/readme.html>. - `gen_pageview_data.py` <br> - Reads pageview/*, and creates a database holding average monthly pageview counts. + Reads pageview/* and `dump_index.db`, and creates a database holding average monthly pageview counts. - `pageview_data.db` <br> Generated using `gen_pageview_data.py`. <br> Tables: <br> @@ -60,4 +60,4 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. # Other Files - `lookup_page.py` <br> Running `lookup_page.py title1` looks in the dump for a page with a given title, - and prints the contents to stdout. Uses dumpIndex.db. + and prints the contents to stdout. Uses dump_index.db. diff --git a/backend/tol_data/enwiki/download_img_license_info.py b/backend/tol_data/enwiki/download_img_license_info.py index 0a809ac..17e15b4 100755 --- a/backend/tol_data/enwiki/download_img_license_info.py +++ b/backend/tol_data/enwiki/download_img_license_info.py @@ -89,7 +89,7 @@ def downloadInfo(imgDb: str) -> None: continue # Parse response-object if 'query' not in responseObj or 'pages' not in responseObj['query']: - print('WARNING: Response object for doesn\'t have page data') + print('WARNING: Response object doesn\'t have page data') print('\tImage batch: ' + '|'.join(imgBatch)) if 'error' in responseObj: errorCode = responseObj['error']['code'] @@ -138,7 +138,6 @@ def downloadInfo(imgDb: str) -> None: credit = html.unescape(credit) credit = urllib.parse.unquote(credit) # Add to db - print((title, license, artist, credit, restrictions, url)) dbCur.execute('INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)', (title, license, artist, credit, restrictions, url)) # diff --git a/backend/tol_data/enwiki/download_imgs.py b/backend/tol_data/enwiki/download_imgs.py index ba874e1..c6a1c21 100755 --- a/backend/tol_data/enwiki/download_imgs.py +++ b/backend/tol_data/enwiki/download_imgs.py @@ -75,7 +75,6 @@ def downloadImgs(imgDb: str, outDir: str, timeout: int) -> None: print(f'WARNING: No filename extension found in URL {url}') continue outFile = os.path.join(outDir, f'{pageId}{extension}') - print(outFile) headers = { 'user-agent': USER_AGENT, 'accept-encoding': 'gzip', diff --git a/backend/tol_data/enwiki/gen_desc_data.py b/backend/tol_data/enwiki/gen_desc_data.py index 0dca16b..b3fde52 100755 --- a/backend/tol_data/enwiki/gen_desc_data.py +++ b/backend/tol_data/enwiki/gen_desc_data.py @@ -1,8 +1,8 @@ #!/usr/bin/python3 """ -Reads through the wiki dump, and attempts to parse short-descriptions, -and add them to a database +Reads through the wiki dump, attempts to parse short-descriptions, +and adds them to a database """ # In testing, this script took over 10 hours to run, and generated about 5GB diff --git a/backend/tol_data/enwiki/gen_dump_index_db.py b/backend/tol_data/enwiki/gen_dump_index_db.py index 5f21c9b..5778680 100755 --- a/backend/tol_data/enwiki/gen_dump_index_db.py +++ b/backend/tol_data/enwiki/gen_dump_index_db.py @@ -8,7 +8,7 @@ import bz2 import sqlite3 INDEX_FILE = 'enwiki-20220501-pages-articles-multistream-index.txt.bz2' # Had about 22e6 lines -DB_FILE = 'dumpIndex.db' +DB_FILE = 'dump_index.db' def genData(indexFile: str, dbFile: str) -> None: """ Reads the index file and creates the db """ diff --git a/backend/tol_data/enwiki/gen_img_data.py b/backend/tol_data/enwiki/gen_img_data.py index d4696f0..040f223 100755 --- a/backend/tol_data/enwiki/gen_img_data.py +++ b/backend/tol_data/enwiki/gen_img_data.py @@ -13,7 +13,7 @@ import os, bz2, html, urllib.parse import sqlite3 DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2' -INDEX_DB = 'dumpIndex.db' +INDEX_DB = 'dump_index.db' IMG_DB = 'img_data.db' # The database to create DB_FILE = os.path.join('..', 'data.db') # diff --git a/backend/tol_data/enwiki/gen_pageview_data.py b/backend/tol_data/enwiki/gen_pageview_data.py index ce3b674..8aee1cc 100755 --- a/backend/tol_data/enwiki/gen_pageview_data.py +++ b/backend/tol_data/enwiki/gen_pageview_data.py @@ -12,7 +12,7 @@ from collections import defaultdict import bz2, sqlite3 PAGEVIEW_FILES = glob.glob('./pageviews/pageviews-*-user.bz2') -DUMP_INDEX_DB = 'dumpIndex.db' +DUMP_INDEX_DB = 'dump_index.db' DB_FILE = 'pageview_data.db' def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None: @@ -42,6 +42,7 @@ def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None: if namespaceRegex.match(title) is not None: continue # Update map + title = title.replace('_', ' ') titleToViews[title] += viewCount print(f'Found {len(titleToViews)} titles') # diff --git a/backend/tol_data/enwiki/lookup_page.py b/backend/tol_data/enwiki/lookup_page.py index 8ef1229..f744818 100755 --- a/backend/tol_data/enwiki/lookup_page.py +++ b/backend/tol_data/enwiki/lookup_page.py @@ -10,7 +10,7 @@ import bz2 import sqlite3 DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2' -INDEX_DB = 'dumpIndex.db' +INDEX_DB = 'dump_index.db' def lookupPage(dumpFile: str, indexDb: str, pageTitle: str) -> None: print('Looking up offset in index db') diff --git a/backend/tol_data/gen_imgs.py b/backend/tol_data/gen_imgs.py index 6d54e4d..0ba75ec 100755 --- a/backend/tol_data/gen_imgs.py +++ b/backend/tol_data/gen_imgs.py @@ -115,7 +115,7 @@ def processPickedImgs( def processImgs( imgListFile: str, eolImgDir: str, eolImgDb: str, enwikiImgDb: str, nodesDone: set[str], imgsDone: set[ImgId], outDir: str, dbCur: sqlite3.Cursor) -> bool: - """ Converts EOL and enwiki images, and updates db, returning False upon interrupted or failure """ + """ Converts EOL and enwiki images, and updates db, returning False upon interruption or failure """ eolCon = sqlite3.connect(eolImgDb) eolCur = eolCon.cursor() enwikiCon = sqlite3.connect(enwikiImgDb) diff --git a/backend/tol_data/gen_mapping_data.py b/backend/tol_data/gen_mapping_data.py index 95e930b..4373d1d 100755 --- a/backend/tol_data/gen_mapping_data.py +++ b/backend/tol_data/gen_mapping_data.py @@ -19,7 +19,7 @@ import gzip, csv, sqlite3 TAXONOMY_FILE = os.path.join('otol', 'taxonomy.tsv') EOL_IDS_FILE = os.path.join('eol', 'provider_ids.csv.gz') WIKIDATA_DB = os.path.join('wikidata', 'taxon_srcs.db') -ENWIKI_DUMP_INDEX_DB = os.path.join('enwiki', 'dumpIndex.db') +ENWIKI_DUMP_INDEX_DB = os.path.join('enwiki', 'dump_index.db') PICKED_MAPPINGS = { 'eol': ['picked_eol_ids.txt'], 'enwiki': ['picked_wiki_ids.txt', 'picked_wiki_ids_rough.txt'] diff --git a/backend/tol_data/picked_imgs/README.md b/backend/tol_data/picked_imgs/README.md index 1edd951..71c13c0 100644 --- a/backend/tol_data/picked_imgs/README.md +++ b/backend/tol_data/picked_imgs/README.md @@ -4,7 +4,7 @@ on top of those from EOL and Wikipedia. Possible Files ============== - (Image files) -- img_data.txt <br> +- `img_data.txt` <br> Contains lines with the format `filename|url|license|artist|credit`. The filename should consist of a node name, with an image extension. Other fields correspond to those in the `images` table (see ../README.md). diff --git a/backend/tol_data/wikidata/README.md b/backend/tol_data/wikidata/README.md index 7b3105e..806b315 100644 --- a/backend/tol_data/wikidata/README.md +++ b/backend/tol_data/wikidata/README.md @@ -1,4 +1,4 @@ -This directory holds files obtained via [Wikidata](https://www.wikidata.org/). +This directory holds files obtained/derived from [Wikidata](https://www.wikidata.org/). # Downloaded Files - `latest-all.json.bz2` <br> @@ -10,7 +10,7 @@ This directory holds files obtained via [Wikidata](https://www.wikidata.org/). Used to generate a database holding taxon information from the dump. - `offsets.dat` <br> Holds bzip2 block offsets for the dump. Generated and used by - genTaxonSrcData.py for parallel processing of the dump. + gen_taxon_src_data.py for parallel processing of the dump. - `taxon_srcs.db` <br> Generated by `gen_taxon_src_data.py`. <br> Tables: <br> diff --git a/backend/tol_data/wikidata/gen_taxon_src_data.py b/backend/tol_data/wikidata/gen_taxon_src_data.py index 50ed917..1bddb6e 100755 --- a/backend/tol_data/wikidata/gen_taxon_src_data.py +++ b/backend/tol_data/wikidata/gen_taxon_src_data.py @@ -50,7 +50,7 @@ IUCN_STATUS_IDS = { 'Q237350': 'extinct species', 'Q3245245': 'data deficient' } # For filtering lines before parsing JSON -LINE_REGEX = re.compile(('"id":(?:"' + '"|"'.join([s for s in TAXON_IDS + TAXON_ALT_IDS]) + '")\D').encode()) +LINE_REGEX = re.compile(('"id":(?:"' + '"|"'.join([s for s in TAXON_IDS + TAXON_ALT_IDS]) + '")').encode()) def genData(wikidataFile: str, offsetsFile: str, dbFile: str, nProcs: int) -> None: """ Reads the dump and writes source/iucn info to db """ @@ -92,8 +92,8 @@ def genData(wikidataFile: str, offsetsFile: str, dbFile: str, nProcs: int) -> No with multiprocessing.Pool(processes=nProcs, maxtasksperchild=1) as pool: for outFilename in pool.map( readDumpChunkOneParam, - ((i, wikidataFile, offsetsFile, chunkIdxs[i], chunkIdxs[i+1], - os.path.join(tempDirName, f'{i}.pickle')) for i in range(nProcs))): + [(i, wikidataFile, offsetsFile, chunkIdxs[i], chunkIdxs[i+1], + os.path.join(tempDirName, f'{i}.pickle')) for i in range(nProcs)]): # Get map data from subprocess output file with open(outFilename, 'rb') as file: maps = pickle.load(file) |
