From 5fe71ea7b9d9a5d2dc6e8e5ce5b9193629eed74d Mon Sep 17 00:00:00 2001 From: Terry Truong Date: Mon, 11 Jul 2022 01:54:08 +1000 Subject: Make backend dev server script serve the image files Previously, image files in backend/data/img were moved to, or symlinked from, public/. This needed to be changed before each build, otherwise vite would end up copying gigabytes of images. --- backend/tolData/README.md | 152 +++++++++++ backend/tolData/addPickedNames.py | 57 ++++ backend/tolData/dbpedia/README.md | 29 ++ backend/tolData/dbpedia/genDescData.py | 130 +++++++++ backend/tolData/enwiki/README.md | 52 ++++ backend/tolData/enwiki/downloadImgLicenseInfo.py | 150 +++++++++++ backend/tolData/enwiki/downloadImgs.py | 91 +++++++ backend/tolData/enwiki/genDescData.py | 127 +++++++++ backend/tolData/enwiki/genDumpIndexDb.py | 58 ++++ backend/tolData/enwiki/genImgData.py | 190 +++++++++++++ backend/tolData/enwiki/lookupPage.py | 68 +++++ backend/tolData/eol/README.md | 26 ++ backend/tolData/eol/downloadImgs.py | 147 ++++++++++ backend/tolData/eol/genImagesListDb.sh | 12 + backend/tolData/eol/reviewImgs.py | 205 ++++++++++++++ backend/tolData/genDbpData.py | 247 +++++++++++++++++ backend/tolData/genEnwikiDescData.py | 102 +++++++ backend/tolData/genEnwikiNameData.py | 76 ++++++ backend/tolData/genEolNameData.py | 184 +++++++++++++ backend/tolData/genImgs.py | 191 +++++++++++++ backend/tolData/genLinkedImgs.py | 125 +++++++++ backend/tolData/genOtolData.py | 250 +++++++++++++++++ backend/tolData/genReducedTrees.py | 329 +++++++++++++++++++++++ backend/tolData/otol/README.md | 10 + backend/tolData/pickedImgs/README.md | 10 + backend/tolData/reviewImgsToGen.py | 225 ++++++++++++++++ 26 files changed, 3243 insertions(+) create mode 100644 backend/tolData/README.md create mode 100755 backend/tolData/addPickedNames.py create mode 100644 backend/tolData/dbpedia/README.md create mode 100755 backend/tolData/dbpedia/genDescData.py create mode 100644 backend/tolData/enwiki/README.md create mode 100755 backend/tolData/enwiki/downloadImgLicenseInfo.py create mode 100755 backend/tolData/enwiki/downloadImgs.py create mode 100755 backend/tolData/enwiki/genDescData.py create mode 100755 backend/tolData/enwiki/genDumpIndexDb.py create mode 100755 backend/tolData/enwiki/genImgData.py create mode 100755 backend/tolData/enwiki/lookupPage.py create mode 100644 backend/tolData/eol/README.md create mode 100755 backend/tolData/eol/downloadImgs.py create mode 100755 backend/tolData/eol/genImagesListDb.sh create mode 100755 backend/tolData/eol/reviewImgs.py create mode 100755 backend/tolData/genDbpData.py create mode 100755 backend/tolData/genEnwikiDescData.py create mode 100755 backend/tolData/genEnwikiNameData.py create mode 100755 backend/tolData/genEolNameData.py create mode 100755 backend/tolData/genImgs.py create mode 100755 backend/tolData/genLinkedImgs.py create mode 100755 backend/tolData/genOtolData.py create mode 100755 backend/tolData/genReducedTrees.py create mode 100644 backend/tolData/otol/README.md create mode 100644 backend/tolData/pickedImgs/README.md create mode 100755 backend/tolData/reviewImgsToGen.py (limited to 'backend/tolData') diff --git a/backend/tolData/README.md b/backend/tolData/README.md new file mode 100644 index 0000000..ba64114 --- /dev/null +++ b/backend/tolData/README.md @@ -0,0 +1,152 @@ +This directory holds files used to generate data.db, which contains tree-of-life data. + +# Tables +## Tree Structure data +- `nodes`
+ Format : `name TEXT PRIMARY KEY, id TEXT UNIQUE, tips INT`
+ Represents a tree-of-life node. `tips` represents the number of no-child descendants. +- `edges`
+ Format: `parent TEXT, child TEXT, p_support INT, PRIMARY KEY (parent, child)`
+ `p_support` is 1 if the edge has 'phylogenetic support', and 0 otherwise +## Node name data +- `eol_ids`
+ Format: `id INT PRIMARY KEY, name TEXT`
+ Associates an EOL ID with a node's name. +- `names`
+ Format: `name TEXT, alt_name TEXT, pref_alt INT, src TEXT, PRIMARY KEY(name, alt_name)`
+ Associates a node with alternative names. + `pref_alt` is 1 if the alt-name is the most 'preferred' one. + `src` indicates the dataset the alt-name was obtained from (can be 'eol', 'enwiki', or 'picked'). +## Node description data +- `wiki_ids`
+ Format: `name TEXT PRIMARY KEY, id INT, redirected INT`
+ Associates a node with a wikipedia page ID. + `redirected` is 1 if the node was associated with a different page that redirected to this one. +- `descs`
+ Format: `wiki_id INT PRIMARY KEY, desc TEXT, from_dbp INT`
+ Associates a wikipedia page ID with a short-description. + `from_dbp` is 1 if the description was obtained from DBpedia, and 0 otherwise. +## Node image data +- `node_imgs`
+ Format: `name TEXT PRIMARY KEY, img_id INT, src TEXT`
+ Associates a node with an image. +- `images`
+ Format: `id INT, src TEXT, url TEXT, license TEXT, artist TEXT, credit TEXT, PRIMARY KEY (id, src)`
+ Represents an image, identified by a source ('eol', 'enwiki', or 'picked'), and a source-specific ID. +- `linked_imgs`
+ Format: `name TEXT PRIMARY KEY, otol_ids TEXT`
+ Associates a node with an image from another node. + `otol_ids` can be an otol ID, or two comma-separated otol IDs or empty strings. + The latter is used for compound nodes. +## Reduced tree data +- `nodes_t`, `nodes_i`, `nodes_p`
+ These are like `nodes`, but describe the nodes for various reduced trees. +- `edges_t`, `edges_i`, `edges_p`
+ Like `edges` but for reduced trees. + +# Generating the Database + +For the most part, these steps should be done in order. + +As a warning, the whole process takes a lot of time and file space. The tree will probably +have about 2.5 billion nodes. Downloading the images takes several days, and occupies over +200 GB. And if you want good data, you'll need to do some manual review, which can take weeks. + +## Environment +The scripts are written in python and bash. +Some of the python scripts require third-party packages: +- jsonpickle: For encoding class objects as JSON. +- requests: For downloading data. +- PIL: For image processing. +- tkinter: For providing a basic GUI to review images. +- mwxml, mwparserfromhell: For parsing Wikipedia dumps. + +## Generate tree structure data +1. Obtain files in otol/, as specified in it's README. +2. Run genOtolData.py, which creates data.db, and adds the `nodes` and `edges` tables, + using data in otol/. It also uses these files, if they exist: + - pickedOtolNames.txt: Has lines of the form `name1|otolId1`. Some nodes in the + tree may have the same name (eg: Pholidota can refer to pangolins or orchids). + Normally, such nodes will get the names 'name1', 'name1 [2]', 'name1 [3], etc. + This file can be used to manually specify which node should be named 'name1'. + +## Generate node name data +1. Obtain 'name data files' in eol/, as specified in it's README. +2. Run genEolNameData.py, which adds the `names` and `eol_ids` tables, using data in + eol/ and the `nodes` table. It also uses these files, if they exist: + - pickedEolIds.txt: Has lines of the form `nodeName1|eolId1` or `nodeName1|`. + Specifies node names that should have a particular EOL ID, or no ID. + Quite a few taxons have ambiguous names, and may need manual correction. + For example, Viola may resolve to a taxon of butterflies or of plants. + - pickedEolAltsToSkip.txt: Has lines of the form `nodeName1|altName1`. + Specifies that a node's alt-name set should exclude altName1. + +## Generate node description data +### Get data from DBpedia +1. Obtain files in dbpedia/, as specified in it's README. +2. Run genDbpData.py, which adds the `wiki_ids` and `descs` tables, using data in + dbpedia/ and the `nodes` table. It also uses these files, if they exist: + - pickedEnwikiNamesToSkip.txt: Each line holds the name of a node for which + no description should be obtained. Many node names have a same-name + wikipedia page that describes something different (eg: Osiris). + - pickedDbpLabels.txt: Has lines of the form `nodeName1|label1`. + Specifies node names that should have a particular associated page label. +### Get data from Wikipedia +1. Obtain 'description database files' in enwiki/, as specified in it's README. +2. Run genEnwikiDescData.py, which adds to the `wiki_ids` and `descs` tables, + using data in enwiki/ and the `nodes` table. + It also uses these files, if they exist: + - pickedEnwikiNamesToSkip.txt: Same as with genDbpData.py. + - pickedEnwikiLabels.txt: Similar to pickedDbpLabels.txt. + +## Generate node image data +### Get images from EOL +1. Obtain 'image metadata files' in eol/, as specified in it's README. +2. In eol/, run downloadImgs.py, which downloads images (possibly multiple per node), + into eol/imgsForReview, using data in eol/, as well as the `eol_ids` table. +3. In eol/, run reviewImgs.py, which interactively displays the downloaded images for + each node, providing the choice of which to use, moving them to eol/imgs/. + Uses `names` and `eol_ids` to display extra info. +### Get images from Wikipedia +1. In enwiki/, run genImgData.py, which looks for wikipedia image names for each node, + using the `wiki_ids` table, and stores them in a database. +2. In enwiki/, run downloadImgLicenseInfo.py, which downloads licensing information for + those images, using wikipedia's online API. +3. In enwiki/, run downloadImgs.py, which downloads 'permissively-licensed' + images into enwiki/imgs/. +### Merge the image sets +1. Run reviewImgsToGen.py, which displays images from eol/imgs/ and enwiki/imgs/, + and enables choosing, for each node, which image should be used, if any, + and outputs choice information into imgList.txt. Uses the `nodes`, + `eol_ids`, and `wiki_ids` tables (as well as `names` to display extra info). +2. Run genImgs.py, which creates cropped/resized images in img/, from files listed in + imgList.txt and located in eol/ and enwiki/, and creates the `node_imgs` and + `images` tables. If pickedImgs/ is present, images within it are also used.
+ The outputs might need to be manually created/adjusted: + - An input image might have no output produced, possibly due to + data incompatibilities, memory limits, etc. A few input image files + might actually be html files, containing a 'file not found' page. + - An input x.gif might produce x-1.jpg, x-2.jpg, etc, instead of x.jpg. + - An input image might produce output with unexpected dimensions. + This seems to happen when the image is very large, and triggers a + decompression bomb warning. + The result might have as many as 150k images, with about 2/3 of them + being from wikipedia. +### Add more image associations +1. Run genLinkedImgs.py, which tries to associate nodes without images to + images of it's children. Adds the `linked_imgs` table, and uses the + `nodes`, `edges`, and `node_imgs` tables. + +## Do some post-processing +1. Run genEnwikiNameData.py, which adds more entries to the `names` table, + using data in enwiki/, and the `names` and `wiki_ids` tables. +2. Optionally run addPickedNames.py, which allows adding manually-selected name data to + the `names` table, as specified in pickedNames.txt. + - pickedNames.txt: Has lines of the form `nodeName1|altName1|prefAlt1`. + These correspond to entries in the `names` table. `prefAlt` should be 1 or 0. + A line like `name1|name1|1` causes a node to have no preferred alt-name. +3. Run genReducedTrees.py, which generates multiple reduced versions of the tree, + adding the `nodes_*` and `edges_*` tables, using `nodes` and `names`. Reads from + pickedNodes.txt, which lists names of nodes that must be included (1 per line). + The original tree isn't used for web-queries, as some nodes would have over + 10k children, which can take a while to render (took over a minute in testing). diff --git a/backend/tolData/addPickedNames.py b/backend/tolData/addPickedNames.py new file mode 100755 index 0000000..d56a0cb --- /dev/null +++ b/backend/tolData/addPickedNames.py @@ -0,0 +1,57 @@ +#!/usr/bin/python3 + +import sys +import sqlite3 + +usageInfo = f""" +Usage: {sys.argv[0]} + +Reads alt-name data from a file, and adds it to the database's 'names' table. +""" +if len(sys.argv) > 1: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +dbFile = "data.db" +pickedNamesFile = "pickedNames.txt" + +print("Opening database") +dbCon = sqlite3.connect(dbFile) +dbCur = dbCon.cursor() + +print("Iterating through picked-names file") +with open(pickedNamesFile) as file: + for line in file: + # Get record data + nodeName, altName, prefAlt = line.lower().rstrip().split("|") + prefAlt = int(prefAlt) + # Check whether there exists a node with the name + row = dbCur.execute("SELECT name from nodes where name = ?", (nodeName,)).fetchone() + if row == None: + print(f"ERROR: No node with name \"{nodeName}\" exists") + break + # Remove any existing preferred-alt status + if prefAlt == 1: + query = "SELECT name, alt_name FROM names WHERE name = ? AND pref_alt = 1" + row = dbCur.execute(query, (nodeName,)).fetchone() + if row != None and row[1] != altName: + print(f"Removing pref-alt status from alt-name {row[1]} for {nodeName}") + dbCur.execute("UPDATE names SET pref_alt = 0 WHERE name = ? AND alt_name = ?", row) + # Check for an existing record + if nodeName == altName: + continue + query = "SELECT name, alt_name, pref_alt FROM names WHERE name = ? AND alt_name = ?" + row = dbCur.execute(query, (nodeName, altName)).fetchone() + if row == None: + print(f"Adding record for alt-name {altName} for {nodeName}") + dbCur.execute("INSERT INTO names VALUES (?, ?, ?, 'picked')", (nodeName, altName, prefAlt)) + else: + # Update existing record + if row[2] != prefAlt: + print(f"Updating record for alt-name {altName} for {nodeName}") + dbCur.execute("UPDATE names SET pref_alt = ?, src = 'picked' WHERE name = ? AND alt_name = ?", + (prefAlt, nodeName, altName)) + +print("Closing database") +dbCon.commit() +dbCon.close() diff --git a/backend/tolData/dbpedia/README.md b/backend/tolData/dbpedia/README.md new file mode 100644 index 0000000..8a08f20 --- /dev/null +++ b/backend/tolData/dbpedia/README.md @@ -0,0 +1,29 @@ +This directory holds files obtained from/using [Dbpedia](https://www.dbpedia.org). + +# Downloaded Files +- `labels_lang=en.ttl.bz2`
+ Obtained via https://databus.dbpedia.org/dbpedia/collections/latest-core. + Downloaded from . +- `page_lang=en_ids.ttl.bz2`
+ Downloaded from +- `redirects_lang=en_transitive.ttl.bz2`
+ Downloaded from . +- `disambiguations_lang=en.ttl.bz2`
+ Downloaded from . +- `instance-types_lang=en_specific.ttl.bz2`
+ Downloaded from . +- `short-abstracts_lang=en.ttl.bz2`
+ Downloaded from . + +# Other Files +- genDescData.py
+ Used to generate a database representing data from the ttl files. +- descData.db
+ Generated by genDescData.py.
+ Tables:
+ - `labels`: `iri TEXT PRIMARY KEY, label TEXT ` + - `ids`: `iri TEXT PRIMARY KEY, id INT` + - `redirects`: `iri TEXT PRIMARY KEY, target TEXT` + - `disambiguations`: `iri TEXT PRIMARY KEY` + - `types`: `iri TEXT, type TEXT` + - `abstracts`: `iri TEXT PRIMARY KEY, abstract TEXT` diff --git a/backend/tolData/dbpedia/genDescData.py b/backend/tolData/dbpedia/genDescData.py new file mode 100755 index 0000000..d9e8a80 --- /dev/null +++ b/backend/tolData/dbpedia/genDescData.py @@ -0,0 +1,130 @@ +#!/usr/bin/python3 + +import sys, re +import bz2, sqlite3 + +usageInfo = f""" +Usage: {sys.argv[0]} + +Adds DBpedia labels/types/abstracts/etc data into a database. +""" +if len(sys.argv) > 1: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +labelsFile = "labels_lang=en.ttl.bz2" # Had about 16e6 entries +idsFile = "page_lang=en_ids.ttl.bz2" +redirectsFile = "redirects_lang=en_transitive.ttl.bz2" +disambigFile = "disambiguations_lang=en.ttl.bz2" +typesFile = "instance-types_lang=en_specific.ttl.bz2" +abstractsFile = "short-abstracts_lang=en.ttl.bz2" +dbFile = "descData.db" +# In testing, this script took a few hours to run, and generated about 10GB + +print("Creating database") +dbCon = sqlite3.connect(dbFile) +dbCur = dbCon.cursor() + +print("Reading/storing label data") +dbCur.execute("CREATE TABLE labels (iri TEXT PRIMARY KEY, label TEXT)") +dbCur.execute("CREATE INDEX labels_idx ON labels(label)") +dbCur.execute("CREATE INDEX labels_idx_nc ON labels(label COLLATE NOCASE)") +labelLineRegex = re.compile(r'<([^>]+)> <[^>]+> "((?:[^"]|\\")+)"@en \.\n') +lineNum = 0 +with bz2.open(labelsFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f"At line {lineNum}") + # + match = labelLineRegex.fullmatch(line) + if match == None: + raise Exception(f"ERROR: Line {lineNum} has unexpected format") + dbCur.execute("INSERT INTO labels VALUES (?, ?)", (match.group(1), match.group(2))) + +print("Reading/storing wiki page ids") +dbCur.execute("CREATE TABLE ids (iri TEXT PRIMARY KEY, id INT)") +idLineRegex = re.compile(r'<([^>]+)> <[^>]+> "(\d+)".*\n') +lineNum = 0 +with bz2.open(idsFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f"At line {lineNum}") + # + match = idLineRegex.fullmatch(line) + if match == None: + raise Exception(f"ERROR: Line {lineNum} has unexpected format") + try: + dbCur.execute("INSERT INTO ids VALUES (?, ?)", (match.group(1), int(match.group(2)))) + except sqlite3.IntegrityError as e: + # Accounts for certain lines that have the same IRI + print(f"WARNING: Failed to add entry with IRI \"{match.group(1)}\": {e}") + +print("Reading/storing redirection data") +dbCur.execute("CREATE TABLE redirects (iri TEXT PRIMARY KEY, target TEXT)") +redirLineRegex = re.compile(r'<([^>]+)> <[^>]+> <([^>]+)> \.\n') +lineNum = 0 +with bz2.open(redirectsFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f"At line {lineNum}") + # + match = redirLineRegex.fullmatch(line) + if match == None: + raise Exception(f"ERROR: Line {lineNum} has unexpected format") + dbCur.execute("INSERT INTO redirects VALUES (?, ?)", (match.group(1), match.group(2))) + +print("Reading/storing diambiguation-page data") +dbCur.execute("CREATE TABLE disambiguations (iri TEXT PRIMARY KEY)") +disambigLineRegex = redirLineRegex +lineNum = 0 +with bz2.open(disambigFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f"At line {lineNum}") + # + match = disambigLineRegex.fullmatch(line) + if match == None: + raise Exception(f"ERROR: Line {lineNum} has unexpected format") + dbCur.execute("INSERT OR IGNORE INTO disambiguations VALUES (?)", (match.group(1),)) + +print("Reading/storing instance-type data") +dbCur.execute("CREATE TABLE types (iri TEXT, type TEXT)") +dbCur.execute("CREATE INDEX types_iri_idx ON types(iri)") +typeLineRegex = redirLineRegex +lineNum = 0 +with bz2.open(typesFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f"At line {lineNum}") + # + match = typeLineRegex.fullmatch(line) + if match == None: + raise Exception(f"ERROR: Line {lineNum} has unexpected format") + dbCur.execute("INSERT INTO types VALUES (?, ?)", (match.group(1), match.group(2))) + +print("Reading/storing abstracts") +dbCur.execute("CREATE TABLE abstracts (iri TEXT PRIMARY KEY, abstract TEXT)") +descLineRegex = labelLineRegex +lineNum = 0 +with bz2.open(abstractsFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f"At line {lineNum}") + # + if line[0] == "#": + continue + match = descLineRegex.fullmatch(line) + if match == None: + raise Exception(f"ERROR: Line {lineNum} has unexpected format") + dbCur.execute("INSERT INTO abstracts VALUES (?, ?)", + (match.group(1), match.group(2).replace(r'\"', '"'))) + +print("Closing database") +dbCon.commit() +dbCon.close() diff --git a/backend/tolData/enwiki/README.md b/backend/tolData/enwiki/README.md new file mode 100644 index 0000000..90d16c7 --- /dev/null +++ b/backend/tolData/enwiki/README.md @@ -0,0 +1,52 @@ +This directory holds files obtained from/using [English Wikipedia](https://en.wikipedia.org/wiki/Main_Page). + +# Downloaded Files +- enwiki-20220501-pages-articles-multistream.xml.bz2
+ Obtained via (site suggests downloading from a mirror). + Contains text content and metadata for pages in enwiki. + Some file content and format information was available from + . +- enwiki-20220501-pages-articles-multistream-index.txt.bz2
+ Obtained like above. Holds lines of the form offset1:pageId1:title1, + providing, for each page, an offset into the dump file of a chunk of + 100 pages that includes it. + +# Generated Dump-Index Files +- genDumpIndexDb.py
+ Creates an sqlite-database version of the enwiki-dump index file. +- dumpIndex.db
+ Generated by genDumpIndexDb.py.
+ Tables:
+ - `offsets`: `title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT` + +# Description Database Files +- genDescData.py
+ Reads through pages in the dump file, and adds short-description info to a database. +- descData.db
+ Generated by genDescData.py.
+ Tables:
+ - `pages`: `id INT PRIMARY KEY, title TEXT UNIQUE` + - `redirects`: `id INT PRIMARY KEY, target TEXT` + - `descs`: `id INT PRIMARY KEY, desc TEXT` + +# Image Database Files +- genImgData.py
+ Used to find infobox image names for page IDs, storing them into a database. +- downloadImgLicenseInfo.py
+ Used to download licensing metadata for image names, via wikipedia's online API, storing them into a database. +- imgData.db
+ Used to hold metadata about infobox images for a set of pageIDs. + Generated using getEnwikiImgData.py and downloadImgLicenseInfo.py.
+ Tables:
+ - `page_imgs`: `page_id INT PRIMAY KEY, img_name TEXT`
+ `img_name` may be null, which means 'none found', and is used to avoid re-processing page-ids. + - `imgs`: `name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT`
+ Might lack some matches for `img_name` in `page_imgs`, due to licensing info unavailability. +- downloadImgs.py
+ Used to download image files into imgs/. + +# Other Files +- lookupPage.py
+ Running `lookupPage.py title1` looks in the dump for a page with a given title, + and prints the contents to stdout. Uses dumpIndex.db. + diff --git a/backend/tolData/enwiki/downloadImgLicenseInfo.py b/backend/tolData/enwiki/downloadImgLicenseInfo.py new file mode 100755 index 0000000..399922e --- /dev/null +++ b/backend/tolData/enwiki/downloadImgLicenseInfo.py @@ -0,0 +1,150 @@ +#!/usr/bin/python3 + +import sys, re +import sqlite3, urllib.parse, html +import requests +import time, signal + +usageInfo = f""" +Usage: {sys.argv[0]} + +Reads image names from a database, and uses enwiki's online API to obtain +licensing information for them, adding the info to the database. + +SIGINT causes the program to finish an ongoing download and exit. +The program can be re-run to continue downloading, and looks +at already-processed names to decide what to skip. +""" +if len(sys.argv) > 1: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +imgDb = "imgData.db" +apiUrl = "https://en.wikipedia.org/w/api.php" +userAgent = "terryt.dev (terry06890@gmail.com)" +batchSz = 50 # Max 50 +tagRegex = re.compile(r"<[^<]+>") +whitespaceRegex = re.compile(r"\s+") + +print("Opening database") +dbCon = sqlite3.connect(imgDb) +dbCur = dbCon.cursor() +dbCur2 = dbCon.cursor() +print("Checking for table") +if dbCur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='imgs'").fetchone() == None: + dbCur.execute("CREATE TABLE imgs(" \ + "name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT)") + +print("Reading image names") +imgNames = set() +for (imgName,) in dbCur.execute("SELECT DISTINCT img_name FROM page_imgs WHERE img_name NOT NULL"): + imgNames.add(imgName) +print(f"Found {len(imgNames)}") + +print("Checking for already-processed images") +oldSz = len(imgNames) +for (imgName,) in dbCur.execute("SELECT name FROM imgs"): + imgNames.discard(imgName) +print(f"Found {oldSz - len(imgNames)}") + +# Set SIGINT handler +interrupted = False +oldHandler = None +def onSigint(sig, frame): + global interrupted + interrupted = True + signal.signal(signal.SIGINT, oldHandler) +oldHandler = signal.signal(signal.SIGINT, onSigint) + +print("Iterating through image names") +imgNames = list(imgNames) +iterNum = 0 +for i in range(0, len(imgNames), batchSz): + iterNum += 1 + if iterNum % 1 == 0: + print(f"At iteration {iterNum} (after {(iterNum - 1) * batchSz} images)") + if interrupted: + print(f"Exiting loop at iteration {iterNum}") + break + # Get batch + imgBatch = imgNames[i:i+batchSz] + imgBatch = ["File:" + x for x in imgBatch] + # Make request + headers = { + "user-agent": userAgent, + "accept-encoding": "gzip", + } + params = { + "action": "query", + "format": "json", + "prop": "imageinfo", + "iiprop": "extmetadata|url", + "maxlag": "5", + "titles": "|".join(imgBatch), + "iiextmetadatafilter": "Artist|Credit|LicenseShortName|Restrictions", + } + responseObj = None + try: + response = requests.get(apiUrl, params=params, headers=headers) + responseObj = response.json() + except Exception as e: + print(f"ERROR: Exception while downloading info: {e}") + print(f"\tImage batch: " + "|".join(imgBatch)) + continue + # Parse response-object + if "query" not in responseObj or "pages" not in responseObj["query"]: + print("WARNING: Response object for doesn't have page data") + print("\tImage batch: " + "|".join(imgBatch)) + if "error" in responseObj: + errorCode = responseObj["error"]["code"] + print(f"\tError code: {errorCode}") + if errorCode == "maxlag": + time.sleep(5) + continue + pages = responseObj["query"]["pages"] + normalisedToInput = {} + if "normalized" in responseObj["query"]: + for entry in responseObj["query"]["normalized"]: + normalisedToInput[entry["to"]] = entry["from"] + for (_, page) in pages.items(): + # Some fields // More info at https://www.mediawiki.org/wiki/Extension:CommonsMetadata#Returned_data + # LicenseShortName: short human-readable license name, apparently more reliable than 'License', + # Artist: author name (might contain complex html, multiple authors, etc) + # Credit: 'source' + # For image-map-like images, can be quite large/complex html, creditng each sub-image + # May be text2, where the text2 might be non-indicative + # Restrictions: specifies non-copyright legal restrictions + title = page["title"] + if title in normalisedToInput: + title = normalisedToInput[title] + title = title[5:] # Remove 'File:' + if title not in imgNames: + print(f"WARNING: Got title \"{title}\" not in image-name list") + continue + if "imageinfo" not in page: + print(f"WARNING: No imageinfo section for page \"{title}\"") + continue + metadata = page["imageinfo"][0]["extmetadata"] + url = page["imageinfo"][0]["url"] + license = metadata['LicenseShortName']['value'] if 'LicenseShortName' in metadata else None + artist = metadata['Artist']['value'] if 'Artist' in metadata else None + credit = metadata['Credit']['value'] if 'Credit' in metadata else None + restrictions = metadata['Restrictions']['value'] if 'Restrictions' in metadata else None + # Remove markup + if artist != None: + artist = tagRegex.sub(" ", artist) + artist = whitespaceRegex.sub(" ", artist) + artist = html.unescape(artist) + artist = urllib.parse.unquote(artist) + if credit != None: + credit = tagRegex.sub(" ", credit) + credit = whitespaceRegex.sub(" ", credit) + credit = html.unescape(credit) + credit = urllib.parse.unquote(credit) + # Add to db + dbCur2.execute("INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)", + (title, license, artist, credit, restrictions, url)) + +print("Closing database") +dbCon.commit() +dbCon.close() diff --git a/backend/tolData/enwiki/downloadImgs.py b/backend/tolData/enwiki/downloadImgs.py new file mode 100755 index 0000000..8fb605f --- /dev/null +++ b/backend/tolData/enwiki/downloadImgs.py @@ -0,0 +1,91 @@ +#!/usr/bin/python3 + +import sys, re, os +import sqlite3 +import urllib.parse, requests +import time, signal + +usageInfo = f""" +Usage: {sys.argv[0]} + +Downloads images from URLs in an image database, into an output directory, +with names of the form 'pageId1.ext1'. + +SIGINT causes the program to finish an ongoing download and exit. +The program can be re-run to continue downloading, and looks +in the output directory do decide what to skip. +""" +if len(sys.argv) > 1: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +imgDb = "imgData.db" # About 130k image names +outDir = "imgs" +licenseRegex = re.compile(r"cc0|cc([ -]by)?([ -]sa)?([ -][1234]\.[05])?( \w\w\w?)?", flags=re.IGNORECASE) +# In testing, this downloaded about 100k images, over several days + +if not os.path.exists(outDir): + os.mkdir(outDir) +print("Checking for already-downloaded images") +fileList = os.listdir(outDir) +pageIdsDone = set() +for filename in fileList: + (basename, extension) = os.path.splitext(filename) + pageIdsDone.add(int(basename)) +print(f"Found {len(pageIdsDone)}") + +# Set SIGINT handler +interrupted = False +oldHandler = None +def onSigint(sig, frame): + global interrupted + interrupted = True + signal.signal(signal.SIGINT, oldHandler) +oldHandler = signal.signal(signal.SIGINT, onSigint) + +print("Opening database") +dbCon = sqlite3.connect(imgDb) +dbCur = dbCon.cursor() +print("Starting downloads") +iterNum = 0 +query = "SELECT page_id, license, artist, credit, restrictions, url FROM" \ + " imgs INNER JOIN page_imgs ON imgs.name = page_imgs.img_name" +for (pageId, license, artist, credit, restrictions, url) in dbCur.execute(query): + if pageId in pageIdsDone: + continue + if interrupted: + print(f"Exiting loop") + break + # Check for problematic attributes + if license == None or licenseRegex.fullmatch(license) == None: + continue + if artist == None or artist == "" or len(artist) > 100 or re.match(r"(\d\. )?File:", artist) != None: + continue + if credit == None or len(credit) > 300 or re.match(r"File:", credit) != None: + continue + if restrictions != None and restrictions != "": + continue + # Download image + iterNum += 1 + print(f"Iteration {iterNum}: Downloading for page-id {pageId}") + urlParts = urllib.parse.urlparse(url) + extension = os.path.splitext(urlParts.path)[1] + if len(extension) <= 1: + print(f"WARNING: No filename extension found in URL {url}") + sys.exit(1) + outFile = f"{outDir}/{pageId}{extension}" + headers = { + "user-agent": "terryt.dev (terry06890@gmail.com)", + "accept-encoding": "gzip", + } + try: + response = requests.get(url, headers=headers) + with open(outFile, 'wb') as file: + file.write(response.content) + time.sleep(1) + # https://en.wikipedia.org/wiki/Wikipedia:Database_download says to "throttle self to 1 cache miss per sec" + # It's unclear how to properly check for cache misses, so this just aims for 1 per sec + except Exception as e: + print(f"Error while downloading to {outFile}: {e}") +print("Closing database") +dbCon.close() diff --git a/backend/tolData/enwiki/genDescData.py b/backend/tolData/enwiki/genDescData.py new file mode 100755 index 0000000..b0ca272 --- /dev/null +++ b/backend/tolData/enwiki/genDescData.py @@ -0,0 +1,127 @@ +#!/usr/bin/python3 + +import sys, os, re +import bz2 +import html, mwxml, mwparserfromhell +import sqlite3 + +usageInfo = f""" +Usage: {sys.argv[0]} + +Reads through the wiki dump, and attempts to +parse short-descriptions, and add them to a database. +""" +if len(sys.argv) > 1: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2" # Had about 22e6 pages +enwikiDb = "descData.db" +# In testing, this script took over 10 hours to run, and generated about 5GB + +descLineRegex = re.compile("^ *[A-Z'\"]") +embeddedHtmlRegex = re.compile(r"<[^<]+/>||<[^([^<]*|[^<]*<[^<]+>[^<]*)|<[^<]+$") + # Recognises a self-closing HTML tag, a tag with 0 children, tag with 1 child with 0 children, or unclosed tag +convertTemplateRegex = re.compile(r"{{convert\|(\d[^|]*)\|(?:(to|-)\|(\d[^|]*)\|)?([a-z][^|}]*)[^}]*}}") +def convertTemplateReplace(match): + if match.group(2) == None: + return f"{match.group(1)} {match.group(4)}" + else: + return f"{match.group(1)} {match.group(2)} {match.group(3)} {match.group(4)}" +parensGroupRegex = re.compile(r" \([^()]*\)") +leftoverBraceRegex = re.compile(r"(?:{\||{{).*") + +def parseDesc(text): + # Find first matching line outside {{...}}, [[...]], and block-html-comment constructs, + # and then accumulate lines until a blank one. + # Some cases not accounted for include: disambiguation pages, abstracts with sentences split-across-lines, + # nested embedded html, 'content significant' embedded-html, markup not removable with mwparsefromhell, + lines = [] + openBraceCount = 0 + openBracketCount = 0 + inComment = False + skip = False + for line in text.splitlines(): + line = line.strip() + if len(lines) == 0: + if len(line) > 0: + if openBraceCount > 0 or line[0] == "{": + openBraceCount += line.count("{") + openBraceCount -= line.count("}") + skip = True + if openBracketCount > 0 or line[0] == "[": + openBracketCount += line.count("[") + openBracketCount -= line.count("]") + skip = True + if inComment or line.find("") != -1: + if inComment: + inComment = False + skip = True + else: + inComment = True + skip = True + if skip: + skip = False + continue + if line[-1] == ":": # Seems to help avoid disambiguation pages + return None + if descLineRegex.match(line) != None: + lines.append(line) + else: + if len(line) == 0: + return removeMarkup(" ".join(lines)) + lines.append(line) + if len(lines) > 0: + return removeMarkup(" ".join(lines)) + return None +def removeMarkup(content): + content = embeddedHtmlRegex.sub("", content) + content = convertTemplateRegex.sub(convertTemplateReplace, content) + content = mwparserfromhell.parse(content).strip_code() # Remove wikitext markup + content = parensGroupRegex.sub("", content) + content = leftoverBraceRegex.sub("", content) + return content +def convertTitle(title): + return html.unescape(title).replace("_", " ") + +print("Creating database") +if os.path.exists(enwikiDb): + raise Exception(f"ERROR: Existing {enwikiDb}") +dbCon = sqlite3.connect(enwikiDb) +dbCur = dbCon.cursor() +dbCur.execute("CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)") +dbCur.execute("CREATE INDEX pages_title_idx ON pages(title COLLATE NOCASE)") +dbCur.execute("CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)") +dbCur.execute("CREATE INDEX redirects_idx ON redirects(target)") +dbCur.execute("CREATE TABLE descs (id INT PRIMARY KEY, desc TEXT)") + +print("Iterating through dump file") +with bz2.open(dumpFile, mode='rt') as file: + dump = mwxml.Dump.from_file(file) + pageNum = 0 + for page in dump: + pageNum += 1 + if pageNum % 1e4 == 0: + print(f"At page {pageNum}") + if pageNum > 3e4: + break + # Parse page + if page.namespace == 0: + try: + dbCur.execute("INSERT INTO pages VALUES (?, ?)", (page.id, convertTitle(page.title))) + except sqlite3.IntegrityError as e: + # Accounts for certain pages that have the same title + print(f"Failed to add page with title \"{page.title}\": {e}", file=sys.stderr) + continue + if page.redirect != None: + dbCur.execute("INSERT INTO redirects VALUES (?, ?)", (page.id, convertTitle(page.redirect))) + else: + revision = next(page) + desc = parseDesc(revision.text) + if desc != None: + dbCur.execute("INSERT INTO descs VALUES (?, ?)", (page.id, desc)) + +print("Closing database") +dbCon.commit() +dbCon.close() diff --git a/backend/tolData/enwiki/genDumpIndexDb.py b/backend/tolData/enwiki/genDumpIndexDb.py new file mode 100755 index 0000000..3955885 --- /dev/null +++ b/backend/tolData/enwiki/genDumpIndexDb.py @@ -0,0 +1,58 @@ +#!/usr/bin/python3 + +import sys, os, re +import bz2 +import sqlite3 + +usageInfo = f""" +Usage: {sys.argv[0]} + +Adds data from the wiki dump index-file into a database. +""" +if len(sys.argv) > 1: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +indexFile = "enwiki-20220501-pages-articles-multistream-index.txt.bz2" # Had about 22e6 lines +indexDb = "dumpIndex.db" + +if os.path.exists(indexDb): + raise Exception(f"ERROR: Existing {indexDb}") +print("Creating database") +dbCon = sqlite3.connect(indexDb) +dbCur = dbCon.cursor() +dbCur.execute("CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)") + +print("Iterating through index file") +lineRegex = re.compile(r"([^:]+):([^:]+):(.*)") +lastOffset = 0 +lineNum = 0 +entriesToAdd = [] +with bz2.open(indexFile, mode='rt') as file: + for line in file: + lineNum += 1 + if lineNum % 1e5 == 0: + print(f"At line {lineNum}") + # + match = lineRegex.fullmatch(line.rstrip()) + (offset, pageId, title) = match.group(1,2,3) + offset = int(offset) + if offset > lastOffset: + for (t, p) in entriesToAdd: + try: + dbCur.execute("INSERT INTO offsets VALUES (?, ?, ?, ?)", (t, p, lastOffset, offset)) + except sqlite3.IntegrityError as e: + # Accounts for certain entries in the file that have the same title + print(f"Failed on title \"{t}\": {e}", file=sys.stderr) + entriesToAdd = [] + lastOffset = offset + entriesToAdd.append([title, pageId]) +for (title, pageId) in entriesToAdd: + try: + dbCur.execute("INSERT INTO offsets VALUES (?, ?, ?, ?)", (title, pageId, lastOffset, -1)) + except sqlite3.IntegrityError as e: + print(f"Failed on title \"{t}\": {e}", file=sys.stderr) + +print("Closing database") +dbCon.commit() +dbCon.close() diff --git a/backend/tolData/enwiki/genImgData.py b/backend/tolData/enwiki/genImgData.py new file mode 100755 index 0000000..dedfe14 --- /dev/null +++ b/backend/tolData/enwiki/genImgData.py @@ -0,0 +1,190 @@ +#!/usr/bin/python3 + +import sys, re +import bz2, html, urllib.parse +import sqlite3 + +usageInfo = f""" +Usage: {sys.argv[0]} + +For some set of page IDs, looks up their content in the wiki dump, +and tries to parse infobox image names, storing them into a database. + +The program can be re-run with an updated set of page IDs, and +will skip already-processed page IDs. +""" +if len(sys.argv) > 1: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +def getInputPageIds(): + pageIds = set() + dbCon = sqlite3.connect("../data.db") + dbCur = dbCon.cursor() + for (pageId,) in dbCur.execute("SELECT id from wiki_ids"): + pageIds.add(pageId) + dbCon.close() + return pageIds +dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2" +indexDb = "dumpIndex.db" +imgDb = "imgData.db" # The database to create +idLineRegex = re.compile(r"(.*)") +imageLineRegex = re.compile(r".*\| *image *= *([^|]*)") +bracketImageRegex = re.compile(r"\[\[(File:[^|]*).*]]") +imageNameRegex = re.compile(r".*\.(jpg|jpeg|png|gif|tiff|tif)", flags=re.IGNORECASE) +cssImgCropRegex = re.compile(r"{{css image crop\|image *= *(.*)", flags=re.IGNORECASE) +# In testing, got about 360k image names + +print("Getting input page-ids") +pageIds = getInputPageIds() +print(f"Found {len(pageIds)}") + +print("Opening databases") +indexDbCon = sqlite3.connect(indexDb) +indexDbCur = indexDbCon.cursor() +imgDbCon = sqlite3.connect(imgDb) +imgDbCur = imgDbCon.cursor() +print("Checking tables") +if imgDbCur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='page_imgs'").fetchone() == None: + # Create tables if not present + imgDbCur.execute("CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)") # img_name may be NULL + imgDbCur.execute("CREATE INDEX page_imgs_idx ON page_imgs(img_name)") +else: + # Check for already-processed page IDs + numSkipped = 0 + for (pid,) in imgDbCur.execute("SELECT page_id FROM page_imgs"): + if pid in pageIds: + pageIds.remove(pid) + numSkipped += 1 + else: + print(f"WARNING: Found already-processed page ID {pid} which was not in input set") + print(f"Will skip {numSkipped} already-processed page IDs") + +print("Getting dump-file offsets") +offsetToPageids = {} +offsetToEnd = {} # Maps chunk-start offsets to their chunk-end offsets +iterNum = 0 +for pageId in pageIds: + iterNum += 1 + if iterNum % 1e4 == 0: + print(f"At iteration {iterNum}") + # + query = "SELECT offset, next_offset FROM offsets WHERE id = ?" + row = indexDbCur.execute(query, (pageId,)).fetchone() + if row == None: + print(f"WARNING: Page ID {pageId} not found") + continue + (chunkOffset, endOffset) = row + offsetToEnd[chunkOffset] = endOffset + if chunkOffset not in offsetToPageids: + offsetToPageids[chunkOffset] = [] + offsetToPageids[chunkOffset].append(pageId) +print(f"Found {len(offsetToEnd)} chunks to check") + +print("Iterating through chunks in dump file") +def getImageName(content): + " Given an array of text-content lines, tries to return an infoxbox image name, or None " + # Doesn't try and find images in outside-infobox [[File:...]] and sections + for line in content: + match = imageLineRegex.match(line) + if match != None: + imageName = match.group(1).strip() + if imageName == "": + return None + imageName = html.unescape(imageName) + # Account for {{... + if imageName.startswith("{"): + match = cssImgCropRegex.match(imageName) + if match == None: + return None + imageName = match.group(1) + # Account for [[File:...|...]] + if imageName.startswith("["): + match = bracketImageRegex.match(imageName) + if match == None: + return None + imageName = match.group(1) + # Account for