diff options
| author | Terry Truong <terry06890@gmail.com> | 2022-07-11 01:54:08 +1000 |
|---|---|---|
| committer | Terry Truong <terry06890@gmail.com> | 2022-07-11 01:54:08 +1000 |
| commit | 5fe71ea7b9d9a5d2dc6e8e5ce5b9193629eed74d (patch) | |
| tree | 3b8b9d7299540a812ec93e224f8fc71249a98860 /backend/tolData/enwiki/lookupPage.py | |
| parent | a8f80a02b88055cfcb45664ce3a3d24c2b2da98c (diff) | |
Make backend dev server script serve the image files
Previously, image files in backend/data/img were moved to, or
symlinked from, public/. This needed to be changed before each
build, otherwise vite would end up copying gigabytes of images.
Diffstat (limited to 'backend/tolData/enwiki/lookupPage.py')
| -rwxr-xr-x | backend/tolData/enwiki/lookupPage.py | 68 |
1 files changed, 68 insertions, 0 deletions
diff --git a/backend/tolData/enwiki/lookupPage.py b/backend/tolData/enwiki/lookupPage.py new file mode 100755 index 0000000..1a90851 --- /dev/null +++ b/backend/tolData/enwiki/lookupPage.py @@ -0,0 +1,68 @@ +#!/usr/bin/python3 + +import sys, re +import bz2 +import sqlite3 + +usageInfo = f""" +Usage: {sys.argv[0]} title1 + +Looks up a page with title title1 in the wiki dump, using +the dump-index db, and prints the corresponding <page>. +""" +if len(sys.argv) != 2: + print(usageInfo, file=sys.stderr) + sys.exit(1) + +dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2" +indexDb = "dumpIndex.db" +pageTitle = sys.argv[1].replace("_", " ") + +print("Looking up offset in index db") +dbCon = sqlite3.connect(indexDb) +dbCur = dbCon.cursor() +query = "SELECT title, offset, next_offset FROM offsets WHERE title = ?" +row = dbCur.execute(query, (pageTitle,)).fetchone() +if row == None: + print("Title not found") + sys.exit(0) +_, pageOffset, endOffset = row +dbCon.close() +print(f"Found chunk at offset {pageOffset}") + +print("Reading from wiki dump") +content = [] +with open(dumpFile, mode='rb') as file: + # Get uncompressed chunk + file.seek(pageOffset) + compressedData = file.read(None if endOffset == -1 else endOffset - pageOffset) + data = bz2.BZ2Decompressor().decompress(compressedData).decode() + # Look in chunk for page + lines = data.splitlines() + lineIdx = 0 + found = False + pageNum = 0 + while not found: + line = lines[lineIdx] + if line.lstrip() == "<page>": + pageNum += 1 + if pageNum > 100: + print("ERROR: Did not find title after 100 pages") + break + lineIdx += 1 + titleLine = lines[lineIdx] + if titleLine.lstrip() == '<title>' + pageTitle + '</title>': + found = True + print(f"Found title in chunk as page {pageNum}") + content.append(line) + content.append(titleLine) + while True: + lineIdx += 1 + line = lines[lineIdx] + content.append(line) + if line.lstrip() == "</page>": + break + lineIdx += 1 + +print("Content: ") +print("\n".join(content)) |
