aboutsummaryrefslogtreecommitdiff
path: root/backend/tolData/enwiki/lookupPage.py
blob: 427aa7a02eae69a36508f969872f82ccda76dd1b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#!/usr/bin/python3

import sys
import bz2
import sqlite3

import argparse
parser = argparse.ArgumentParser(description="""
Looks up a page with title title1 in the wiki dump, using the dump-index
db, and prints the corresponding <page>.
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("title", help="The title to look up")
args = parser.parse_args()

dumpFile = 'enwiki-20220501-pages-articles-multistream.xml.bz2'
indexDb = 'dumpIndex.db'
pageTitle = args.title.replace('_', ' ')

print('Looking up offset in index db')
dbCon = sqlite3.connect(indexDb)
dbCur = dbCon.cursor()
query = 'SELECT title, offset, next_offset FROM offsets WHERE title = ?'
row = dbCur.execute(query, (pageTitle,)).fetchone()
if row is None:
	print('Title not found')
	sys.exit(0)
_, pageOffset, endOffset = row
dbCon.close()
print(f'Found chunk at offset {pageOffset}')

print('Reading from wiki dump')
content: list[str] = []
with open(dumpFile, mode='rb') as file:
	# Get uncompressed chunk
	file.seek(pageOffset)
	compressedData = file.read(None if endOffset == -1 else endOffset - pageOffset)
	data = bz2.BZ2Decompressor().decompress(compressedData).decode()
	# Look in chunk for page
	lines = data.splitlines()
	lineIdx = 0
	found = False
	pageNum = 0
	while not found:
		line = lines[lineIdx]
		if line.lstrip() == '<page>':
			pageNum += 1
			if pageNum > 100:
				print('ERROR: Did not find title after 100 pages')
				break
			lineIdx += 1
			titleLine = lines[lineIdx]
			if titleLine.lstrip() == '<title>' + pageTitle + '</title>':
				found = True
				print(f'Found title in chunk as page {pageNum}')
				content.append(line)
				content.append(titleLine)
				while True:
					lineIdx += 1
					line = lines[lineIdx]
					content.append(line)
					if line.lstrip() == '</page>':
						break
		lineIdx += 1

print('Content: ')
print('\n'.join(content))