aboutsummaryrefslogtreecommitdiff
path: root/backend/tol_data/enwiki/gen_desc_data.py
diff options
context:
space:
mode:
authorTerry Truong <terry06890@gmail.com>2022-09-11 14:55:42 +1000
committerTerry Truong <terry06890@gmail.com>2022-09-11 15:04:14 +1000
commit5de5fb93e50fe9006221b30ac4a66f1be0db82e7 (patch)
tree2567c25c902dbb40d44419805cebb38171df47fa /backend/tol_data/enwiki/gen_desc_data.py
parentdaccbbd9c73a5292ea9d6746560d7009e5aa666d (diff)
Add backend unit tests
- Add unit testing code in backend/tests/ - Change to snake-case for script/file/directory names - Use os.path.join() instead of '/' - Refactor script code into function defs and a main-guard - Make global vars all-caps Some fixes: - For getting descriptions, some wiki redirects weren't properly resolved - Linked images were sub-optimally propagated - Generation of reduced trees assumed a wiki-id association implied a description - Tilo.py had potential null dereferences by not always using a reduced node set - EOL image downloading didn't properly wait for all threads to end when finishing
Diffstat (limited to 'backend/tol_data/enwiki/gen_desc_data.py')
-rwxr-xr-xbackend/tol_data/enwiki/gen_desc_data.py126
1 files changed, 126 insertions, 0 deletions
diff --git a/backend/tol_data/enwiki/gen_desc_data.py b/backend/tol_data/enwiki/gen_desc_data.py
new file mode 100755
index 0000000..0dca16b
--- /dev/null
+++ b/backend/tol_data/enwiki/gen_desc_data.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python3
+
+"""
+Reads through the wiki dump, and attempts to parse short-descriptions,
+and add them to a database
+"""
+
+# In testing, this script took over 10 hours to run, and generated about 5GB
+
+import sys, os, re
+import bz2
+import html, mwxml, mwparserfromhell
+import sqlite3
+
+DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2' # Had about 22e6 pages
+DB_FILE = 'desc_data.db'
+
+DESC_LINE_REGEX = re.compile('^ *[A-Z\'"]')
+EMBEDDED_HTML_REGEX = re.compile(r'<[^<]+/>|<!--[^<]+-->|<[^</]+>([^<]*|[^<]*<[^<]+>[^<]*)</[^<]+>|<[^<]+$')
+ # Recognises a self-closing HTML tag, a tag with 0 children, tag with 1 child with 0 children, or unclosed tag
+CONVERT_TEMPLATE_REGEX = re.compile(r'{{convert\|(\d[^|]*)\|(?:(to|-)\|(\d[^|]*)\|)?([a-z][^|}]*)[^}]*}}')
+def convertTemplateReplace(match):
+ """ Used in regex-substitution with CONVERT_TEMPLATE_REGEX """
+ if match.group(2) is None:
+ return f'{match.group(1)} {match.group(4)}'
+ else:
+ return f'{match.group(1)} {match.group(2)} {match.group(3)} {match.group(4)}'
+PARENS_GROUP_REGEX = re.compile(r' \([^()]*\)')
+LEFTOVER_BRACE_REGEX = re.compile(r'(?:{\||{{).*')
+
+def genData(dumpFile: str, dbFile: str) -> None:
+ print('Creating database')
+ if os.path.exists(dbFile):
+ raise Exception(f'ERROR: Existing {dbFile}')
+ dbCon = sqlite3.connect(dbFile)
+ dbCur = dbCon.cursor()
+ dbCur.execute('CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)')
+ dbCur.execute('CREATE INDEX pages_title_idx ON pages(title COLLATE NOCASE)')
+ dbCur.execute('CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)')
+ dbCur.execute('CREATE INDEX redirects_idx ON redirects(target)')
+ dbCur.execute('CREATE TABLE descs (id INT PRIMARY KEY, desc TEXT)')
+ #
+ print('Iterating through dump file')
+ with bz2.open(dumpFile, mode='rt') as file:
+ for pageNum, page in enumerate(mwxml.Dump.from_file(file), 1):
+ if pageNum % 1e4 == 0:
+ print(f'At page {pageNum}')
+ # Parse page
+ if page.namespace == 0:
+ try:
+ dbCur.execute('INSERT INTO pages VALUES (?, ?)', (page.id, convertTitle(page.title)))
+ except sqlite3.IntegrityError as e:
+ # Accounts for certain pages that have the same title
+ print(f'Failed to add page with title "{page.title}": {e}', file=sys.stderr)
+ continue
+ if page.redirect is not None:
+ dbCur.execute('INSERT INTO redirects VALUES (?, ?)', (page.id, convertTitle(page.redirect)))
+ else:
+ revision = next(page)
+ desc = parseDesc(revision.text)
+ if desc is not None:
+ dbCur.execute('INSERT INTO descs VALUES (?, ?)', (page.id, desc))
+ #
+ print('Closing database')
+ dbCon.commit()
+ dbCon.close()
+def parseDesc(text: str) -> str | None:
+ # Find first matching line outside {{...}}, [[...]], and block-html-comment constructs,
+ # and then accumulate lines until a blank one.
+ # Some cases not accounted for include: disambiguation pages, abstracts with sentences split-across-lines,
+ # nested embedded html, 'content significant' embedded-html, markup not removable with mwparsefromhell,
+ lines: list[str] = []
+ openBraceCount = 0
+ openBracketCount = 0
+ inComment = False
+ skip = False
+ for line in text.splitlines():
+ line = line.strip()
+ if not lines:
+ if line:
+ if openBraceCount > 0 or line[0] == '{':
+ openBraceCount += line.count('{')
+ openBraceCount -= line.count('}')
+ skip = True
+ if openBracketCount > 0 or line[0] == '[':
+ openBracketCount += line.count('[')
+ openBracketCount -= line.count(']')
+ skip = True
+ if inComment or line.find('<!--') != -1:
+ if line.find('-->') != -1:
+ if inComment:
+ inComment = False
+ skip = True
+ else:
+ inComment = True
+ skip = True
+ if skip:
+ skip = False
+ continue
+ if line[-1] == ':': # Seems to help avoid disambiguation pages
+ return None
+ if DESC_LINE_REGEX.match(line) is not None:
+ lines.append(line)
+ else:
+ if not line:
+ return removeMarkup(' '.join(lines))
+ lines.append(line)
+ if lines:
+ return removeMarkup(' '.join(lines))
+ return None
+def removeMarkup(content: str) -> str:
+ content = EMBEDDED_HTML_REGEX.sub('', content)
+ content = CONVERT_TEMPLATE_REGEX.sub(convertTemplateReplace, content)
+ content = mwparserfromhell.parse(content).strip_code() # Remove wikitext markup
+ content = PARENS_GROUP_REGEX.sub('', content)
+ content = LEFTOVER_BRACE_REGEX.sub('', content)
+ return content
+def convertTitle(title: str) -> str:
+ return html.unescape(title).replace('_', ' ')
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.parse_args()
+ #
+ genData(DUMP_FILE, DB_FILE)