diff options
Diffstat (limited to 'backend/tolData/enwiki')
| -rwxr-xr-x | backend/tolData/enwiki/downloadImgLicenseInfo.py | 136 | ||||
| -rwxr-xr-x | backend/tolData/enwiki/downloadImgs.py | 50 | ||||
| -rwxr-xr-x | backend/tolData/enwiki/genDescData.py | 100 | ||||
| -rwxr-xr-x | backend/tolData/enwiki/genDumpIndexDb.py | 39 | ||||
| -rwxr-xr-x | backend/tolData/enwiki/genImgData.py | 118 | ||||
| -rwxr-xr-x | backend/tolData/enwiki/genPageviewData.py | 10 | ||||
| -rwxr-xr-x | backend/tolData/enwiki/lookupPage.py | 34 |
7 files changed, 244 insertions, 243 deletions
diff --git a/backend/tolData/enwiki/downloadImgLicenseInfo.py b/backend/tolData/enwiki/downloadImgLicenseInfo.py index dd39d54..ba6317e 100755 --- a/backend/tolData/enwiki/downloadImgLicenseInfo.py +++ b/backend/tolData/enwiki/downloadImgLicenseInfo.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 -import sys, re +import re import sqlite3, urllib.parse, html import requests import time, signal @@ -16,33 +16,33 @@ at already-processed names to decide what to skip. """, formatter_class=argparse.RawDescriptionHelpFormatter) parser.parse_args() -imgDb = "imgData.db" -apiUrl = "https://en.wikipedia.org/w/api.php" -userAgent = "terryt.dev (terry06890@gmail.com)" +imgDb = 'imgData.db' +apiUrl = 'https://en.wikipedia.org/w/api.php' +userAgent = 'terryt.dev (terry06890@gmail.com)' batchSz = 50 # Max 50 -tagRegex = re.compile(r"<[^<]+>") -whitespaceRegex = re.compile(r"\s+") +tagRegex = re.compile(r'<[^<]+>') +whitespaceRegex = re.compile(r'\s+') -print("Opening database") +print('Opening database') dbCon = sqlite3.connect(imgDb) dbCur = dbCon.cursor() dbCur2 = dbCon.cursor() -print("Checking for table") -if dbCur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='imgs'").fetchone() == None: - dbCur.execute("CREATE TABLE imgs(" \ - "name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT)") +print('Checking for table') +if dbCur.execute('SELECT name FROM sqlite_master WHERE type="table" AND name="imgs"').fetchone() is None: + dbCur.execute('CREATE TABLE imgs(' \ + 'name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT)') -print("Reading image names") -imgNames = set() -for (imgName,) in dbCur.execute("SELECT DISTINCT img_name FROM page_imgs WHERE img_name NOT NULL"): +print('Reading image names') +imgNames: set[str] = set() +for (imgName,) in dbCur.execute('SELECT DISTINCT img_name FROM page_imgs WHERE img_name NOT NULL'): imgNames.add(imgName) -print(f"Found {len(imgNames)}") +print(f'Found {len(imgNames)}') -print("Checking for already-processed images") +print('Checking for already-processed images') oldSz = len(imgNames) -for (imgName,) in dbCur.execute("SELECT name FROM imgs"): +for (imgName,) in dbCur.execute('SELECT name FROM imgs'): imgNames.discard(imgName) -print(f"Found {oldSz - len(imgNames)}") +print(f'Found {oldSz - len(imgNames)}') # Set SIGINT handler interrupted = False @@ -53,95 +53,95 @@ def onSigint(sig, frame): signal.signal(signal.SIGINT, oldHandler) oldHandler = signal.signal(signal.SIGINT, onSigint) -print("Iterating through image names") -imgNames = list(imgNames) +print('Iterating through image names') +imgNameList = list(imgNames) iterNum = 0 -for i in range(0, len(imgNames), batchSz): +for i in range(0, len(imgNameList), batchSz): iterNum += 1 if iterNum % 1 == 0: - print(f"At iteration {iterNum} (after {(iterNum - 1) * batchSz} images)") + print(f'At iteration {iterNum} (after {(iterNum - 1) * batchSz} images)') if interrupted: - print(f"Exiting loop at iteration {iterNum}") + print(f'Exiting loop at iteration {iterNum}') break # Get batch - imgBatch = imgNames[i:i+batchSz] - imgBatch = ["File:" + x for x in imgBatch] + imgBatch = imgNameList[i:i+batchSz] + imgBatch = ['File:' + x for x in imgBatch] # Make request headers = { - "user-agent": userAgent, - "accept-encoding": "gzip", + 'user-agent': userAgent, + 'accept-encoding': 'gzip', } params = { - "action": "query", - "format": "json", - "prop": "imageinfo", - "iiprop": "extmetadata|url", - "maxlag": "5", - "titles": "|".join(imgBatch), - "iiextmetadatafilter": "Artist|Credit|LicenseShortName|Restrictions", + 'action': 'query', + 'format': 'json', + 'prop': 'imageinfo', + 'iiprop': 'extmetadata|url', + 'maxlag': '5', + 'titles': '|'.join(imgBatch), + 'iiextmetadatafilter': 'Artist|Credit|LicenseShortName|Restrictions', } responseObj = None try: response = requests.get(apiUrl, params=params, headers=headers) responseObj = response.json() except Exception as e: - print(f"ERROR: Exception while downloading info: {e}") - print(f"\tImage batch: " + "|".join(imgBatch)) + print(f'ERROR: Exception while downloading info: {e}') + print('\tImage batch: ' + '|'.join(imgBatch)) continue # Parse response-object - if "query" not in responseObj or "pages" not in responseObj["query"]: - print("WARNING: Response object for doesn't have page data") - print("\tImage batch: " + "|".join(imgBatch)) - if "error" in responseObj: - errorCode = responseObj["error"]["code"] - print(f"\tError code: {errorCode}") - if errorCode == "maxlag": + if 'query' not in responseObj or 'pages' not in responseObj['query']: + print('WARNING: Response object for doesn\'t have page data') + print('\tImage batch: ' + '|'.join(imgBatch)) + if 'error' in responseObj: + errorCode = responseObj['error']['code'] + print(f'\tError code: {errorCode}') + if errorCode == 'maxlag': time.sleep(5) continue - pages = responseObj["query"]["pages"] - normalisedToInput = {} - if "normalized" in responseObj["query"]: - for entry in responseObj["query"]["normalized"]: - normalisedToInput[entry["to"]] = entry["from"] - for (_, page) in pages.items(): + pages = responseObj['query']['pages'] + normalisedToInput: dict[str, str] = {} + if 'normalized' in responseObj['query']: + for entry in responseObj['query']['normalized']: + normalisedToInput[entry['to']] = entry['from'] + for _, page in pages.items(): # Some fields // More info at https://www.mediawiki.org/wiki/Extension:CommonsMetadata#Returned_data # LicenseShortName: short human-readable license name, apparently more reliable than 'License', # Artist: author name (might contain complex html, multiple authors, etc) # Credit: 'source' # For image-map-like images, can be quite large/complex html, creditng each sub-image - # May be <a href="text1">text2</a>, where the text2 might be non-indicative + # May be <a href='text1'>text2</a>, where the text2 might be non-indicative # Restrictions: specifies non-copyright legal restrictions - title = page["title"] + title: str = page['title'] if title in normalisedToInput: title = normalisedToInput[title] title = title[5:] # Remove 'File:' if title not in imgNames: - print(f"WARNING: Got title \"{title}\" not in image-name list") + print(f'WARNING: Got title "{title}" not in image-name list') continue - if "imageinfo" not in page: - print(f"WARNING: No imageinfo section for page \"{title}\"") + if 'imageinfo' not in page: + print(f'WARNING: No imageinfo section for page "{title}"') continue - metadata = page["imageinfo"][0]["extmetadata"] - url = page["imageinfo"][0]["url"] - license = metadata['LicenseShortName']['value'] if 'LicenseShortName' in metadata else None - artist = metadata['Artist']['value'] if 'Artist' in metadata else None - credit = metadata['Credit']['value'] if 'Credit' in metadata else None - restrictions = metadata['Restrictions']['value'] if 'Restrictions' in metadata else None + metadata = page['imageinfo'][0]['extmetadata'] + url: str = page['imageinfo'][0]['url'] + license: str | None = metadata['LicenseShortName']['value'] if 'LicenseShortName' in metadata else None + artist: str | None = metadata['Artist']['value'] if 'Artist' in metadata else None + credit: str | None = metadata['Credit']['value'] if 'Credit' in metadata else None + restrictions: str | None = metadata['Restrictions']['value'] if 'Restrictions' in metadata else None # Remove markup - if artist != None: - artist = tagRegex.sub(" ", artist) - artist = whitespaceRegex.sub(" ", artist) + if artist is not None: + artist = tagRegex.sub(' ', artist) + artist = whitespaceRegex.sub(' ', artist) artist = html.unescape(artist) artist = urllib.parse.unquote(artist) - if credit != None: - credit = tagRegex.sub(" ", credit) - credit = whitespaceRegex.sub(" ", credit) + if credit is not None: + credit = tagRegex.sub(' ', credit) + credit = whitespaceRegex.sub(' ', credit) credit = html.unescape(credit) credit = urllib.parse.unquote(credit) # Add to db - dbCur2.execute("INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)", + dbCur2.execute('INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)', (title, license, artist, credit, restrictions, url)) -print("Closing database") +print('Closing database') dbCon.commit() dbCon.close() diff --git a/backend/tolData/enwiki/downloadImgs.py b/backend/tolData/enwiki/downloadImgs.py index 520677f..def4714 100755 --- a/backend/tolData/enwiki/downloadImgs.py +++ b/backend/tolData/enwiki/downloadImgs.py @@ -16,20 +16,20 @@ in the output directory do decide what to skip. """, formatter_class=argparse.RawDescriptionHelpFormatter) parser.parse_args() -imgDb = "imgData.db" # About 130k image names -outDir = "imgs" -licenseRegex = re.compile(r"cc0|cc([ -]by)?([ -]sa)?([ -][1234]\.[05])?( \w\w\w?)?", flags=re.IGNORECASE) +imgDb = 'imgData.db' # About 130k image names +outDir = 'imgs' +licenseRegex = re.compile(r'cc0|cc([ -]by)?([ -]sa)?([ -][1234]\.[05])?( \w\w\w?)?', flags=re.IGNORECASE) # In testing, this downloaded about 100k images, over several days if not os.path.exists(outDir): os.mkdir(outDir) -print("Checking for already-downloaded images") +print('Checking for already-downloaded images') fileList = os.listdir(outDir) -pageIdsDone = set() +pageIdsDone: set[int] = set() for filename in fileList: - (basename, extension) = os.path.splitext(filename) + basename, extension = os.path.splitext(filename) pageIdsDone.add(int(basename)) -print(f"Found {len(pageIdsDone)}") +print(f'Found {len(pageIdsDone)}') # Set SIGINT handler interrupted = False @@ -40,49 +40,49 @@ def onSigint(sig, frame): signal.signal(signal.SIGINT, oldHandler) oldHandler = signal.signal(signal.SIGINT, onSigint) -print("Opening database") +print('Opening database') dbCon = sqlite3.connect(imgDb) dbCur = dbCon.cursor() -print("Starting downloads") +print('Starting downloads') iterNum = 0 -query = "SELECT page_id, license, artist, credit, restrictions, url FROM" \ - " imgs INNER JOIN page_imgs ON imgs.name = page_imgs.img_name" -for (pageId, license, artist, credit, restrictions, url) in dbCur.execute(query): +query = 'SELECT page_id, license, artist, credit, restrictions, url FROM' \ + ' imgs INNER JOIN page_imgs ON imgs.name = page_imgs.img_name' +for pageId, license, artist, credit, restrictions, url in dbCur.execute(query): if pageId in pageIdsDone: continue if interrupted: - print(f"Exiting loop") + print('Exiting loop') break # Check for problematic attributes - if license == None or licenseRegex.fullmatch(license) == None: + if license is None or licenseRegex.fullmatch(license) is None: continue - if artist == None or artist == "" or len(artist) > 100 or re.match(r"(\d\. )?File:", artist) != None: + if artist is None or artist == '' or len(artist) > 100 or re.match(r'(\d\. )?File:', artist) is not None: continue - if credit == None or len(credit) > 300 or re.match(r"File:", credit) != None: + if credit is None or len(credit) > 300 or re.match(r'File:', credit) is not None: continue - if restrictions != None and restrictions != "": + if restrictions is not None and restrictions != '': continue # Download image iterNum += 1 - print(f"Iteration {iterNum}: Downloading for page-id {pageId}") + print(f'Iteration {iterNum}: Downloading for page-id {pageId}') urlParts = urllib.parse.urlparse(url) extension = os.path.splitext(urlParts.path)[1] if len(extension) <= 1: - print(f"WARNING: No filename extension found in URL {url}") + print(f'WARNING: No filename extension found in URL {url}') sys.exit(1) - outFile = f"{outDir}/{pageId}{extension}" + outFile = f'{outDir}/{pageId}{extension}' headers = { - "user-agent": "terryt.dev (terry06890@gmail.com)", - "accept-encoding": "gzip", + 'user-agent': 'terryt.dev (terry06890@gmail.com)', + 'accept-encoding': 'gzip', } try: response = requests.get(url, headers=headers) with open(outFile, 'wb') as file: file.write(response.content) time.sleep(1) - # https://en.wikipedia.org/wiki/Wikipedia:Database_download says to "throttle self to 1 cache miss per sec" + # https://en.wikipedia.org/wiki/Wikipedia:Database_download says to 'throttle self to 1 cache miss per sec' # It's unclear how to properly check for cache misses, so this just aims for 1 per sec except Exception as e: - print(f"Error while downloading to {outFile}: {e}") -print("Closing database") + print(f'Error while downloading to {outFile}: {e}') +print('Closing database') dbCon.close() diff --git a/backend/tolData/enwiki/genDescData.py b/backend/tolData/enwiki/genDescData.py index 0085d70..1698f5c 100755 --- a/backend/tolData/enwiki/genDescData.py +++ b/backend/tolData/enwiki/genDescData.py @@ -12,46 +12,46 @@ and add them to a database """, formatter_class=argparse.RawDescriptionHelpFormatter) parser.parse_args() -dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2" # Had about 22e6 pages -enwikiDb = "descData.db" +dumpFile = 'enwiki-20220501-pages-articles-multistream.xml.bz2' # Had about 22e6 pages +enwikiDb = 'descData.db' # In testing, this script took over 10 hours to run, and generated about 5GB -descLineRegex = re.compile("^ *[A-Z'\"]") -embeddedHtmlRegex = re.compile(r"<[^<]+/>|<!--[^<]+-->|<[^</]+>([^<]*|[^<]*<[^<]+>[^<]*)</[^<]+>|<[^<]+$") +descLineRegex = re.compile('^ *[A-Z\'"]') +embeddedHtmlRegex = re.compile(r'<[^<]+/>|<!--[^<]+-->|<[^</]+>([^<]*|[^<]*<[^<]+>[^<]*)</[^<]+>|<[^<]+$') # Recognises a self-closing HTML tag, a tag with 0 children, tag with 1 child with 0 children, or unclosed tag -convertTemplateRegex = re.compile(r"{{convert\|(\d[^|]*)\|(?:(to|-)\|(\d[^|]*)\|)?([a-z][^|}]*)[^}]*}}") +convertTemplateRegex = re.compile(r'{{convert\|(\d[^|]*)\|(?:(to|-)\|(\d[^|]*)\|)?([a-z][^|}]*)[^}]*}}') def convertTemplateReplace(match): - if match.group(2) == None: - return f"{match.group(1)} {match.group(4)}" + if match.group(2) is None: + return f'{match.group(1)} {match.group(4)}' else: - return f"{match.group(1)} {match.group(2)} {match.group(3)} {match.group(4)}" -parensGroupRegex = re.compile(r" \([^()]*\)") -leftoverBraceRegex = re.compile(r"(?:{\||{{).*") + return f'{match.group(1)} {match.group(2)} {match.group(3)} {match.group(4)}' +parensGroupRegex = re.compile(r' \([^()]*\)') +leftoverBraceRegex = re.compile(r'(?:{\||{{).*') -def parseDesc(text): +def parseDesc(text: str) -> str | None: # Find first matching line outside {{...}}, [[...]], and block-html-comment constructs, # and then accumulate lines until a blank one. # Some cases not accounted for include: disambiguation pages, abstracts with sentences split-across-lines, # nested embedded html, 'content significant' embedded-html, markup not removable with mwparsefromhell, - lines = [] + lines: list[str] = [] openBraceCount = 0 openBracketCount = 0 inComment = False skip = False for line in text.splitlines(): line = line.strip() - if len(lines) == 0: - if len(line) > 0: - if openBraceCount > 0 or line[0] == "{": - openBraceCount += line.count("{") - openBraceCount -= line.count("}") + if not lines: + if line: + if openBraceCount > 0 or line[0] == '{': + openBraceCount += line.count('{') + openBraceCount -= line.count('}') skip = True - if openBracketCount > 0 or line[0] == "[": - openBracketCount += line.count("[") - openBracketCount -= line.count("]") + if openBracketCount > 0 or line[0] == '[': + openBracketCount += line.count('[') + openBracketCount -= line.count(']') skip = True - if inComment or line.find("<!--") != -1: - if line.find("-->") != -1: + if inComment or line.find('<!--') != -1: + if line.find('-->') != -1: if inComment: inComment = False skip = True @@ -61,64 +61,64 @@ def parseDesc(text): if skip: skip = False continue - if line[-1] == ":": # Seems to help avoid disambiguation pages + if line[-1] == ':': # Seems to help avoid disambiguation pages return None - if descLineRegex.match(line) != None: + if descLineRegex.match(line) is not None: lines.append(line) else: - if len(line) == 0: - return removeMarkup(" ".join(lines)) + if not line: + return removeMarkup(' '.join(lines)) lines.append(line) - if len(lines) > 0: - return removeMarkup(" ".join(lines)) + if lines: + return removeMarkup(' '.join(lines)) return None -def removeMarkup(content): - content = embeddedHtmlRegex.sub("", content) +def removeMarkup(content: str) -> str: + content = embeddedHtmlRegex.sub('', content) content = convertTemplateRegex.sub(convertTemplateReplace, content) content = mwparserfromhell.parse(content).strip_code() # Remove wikitext markup - content = parensGroupRegex.sub("", content) - content = leftoverBraceRegex.sub("", content) + content = parensGroupRegex.sub('', content) + content = leftoverBraceRegex.sub('', content) return content -def convertTitle(title): - return html.unescape(title).replace("_", " ") +def convertTitle(title: str) -> str: + return html.unescape(title).replace('_', ' ') -print("Creating database") +print('Creating database') if os.path.exists(enwikiDb): - raise Exception(f"ERROR: Existing {enwikiDb}") + raise Exception(f'ERROR: Existing {enwikiDb}') dbCon = sqlite3.connect(enwikiDb) dbCur = dbCon.cursor() -dbCur.execute("CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)") -dbCur.execute("CREATE INDEX pages_title_idx ON pages(title COLLATE NOCASE)") -dbCur.execute("CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)") -dbCur.execute("CREATE INDEX redirects_idx ON redirects(target)") -dbCur.execute("CREATE TABLE descs (id INT PRIMARY KEY, desc TEXT)") +dbCur.execute('CREATE TABLE pages (id INT PRIMARY KEY, title TEXT UNIQUE)') +dbCur.execute('CREATE INDEX pages_title_idx ON pages(title COLLATE NOCASE)') +dbCur.execute('CREATE TABLE redirects (id INT PRIMARY KEY, target TEXT)') +dbCur.execute('CREATE INDEX redirects_idx ON redirects(target)') +dbCur.execute('CREATE TABLE descs (id INT PRIMARY KEY, desc TEXT)') -print("Iterating through dump file") +print('Iterating through dump file') with bz2.open(dumpFile, mode='rt') as file: dump = mwxml.Dump.from_file(file) pageNum = 0 for page in dump: pageNum += 1 if pageNum % 1e4 == 0: - print(f"At page {pageNum}") + print(f'At page {pageNum}') if pageNum > 3e4: break # Parse page if page.namespace == 0: try: - dbCur.execute("INSERT INTO pages VALUES (?, ?)", (page.id, convertTitle(page.title))) + dbCur.execute('INSERT INTO pages VALUES (?, ?)', (page.id, convertTitle(page.title))) except sqlite3.IntegrityError as e: # Accounts for certain pages that have the same title - print(f"Failed to add page with title \"{page.title}\": {e}", file=sys.stderr) + print(f'Failed to add page with title "{page.title}": {e}', file=sys.stderr) continue - if page.redirect != None: - dbCur.execute("INSERT INTO redirects VALUES (?, ?)", (page.id, convertTitle(page.redirect))) + if page.redirect is not None: + dbCur.execute('INSERT INTO redirects VALUES (?, ?)', (page.id, convertTitle(page.redirect))) else: revision = next(page) desc = parseDesc(revision.text) - if desc != None: - dbCur.execute("INSERT INTO descs VALUES (?, ?)", (page.id, desc)) + if desc is not None: + dbCur.execute('INSERT INTO descs VALUES (?, ?)', (page.id, desc)) -print("Closing database") +print('Closing database') dbCon.commit() dbCon.close() diff --git a/backend/tolData/enwiki/genDumpIndexDb.py b/backend/tolData/enwiki/genDumpIndexDb.py index 1bffb27..3bd129f 100755 --- a/backend/tolData/enwiki/genDumpIndexDb.py +++ b/backend/tolData/enwiki/genDumpIndexDb.py @@ -10,46 +10,47 @@ Adds data from the wiki dump index-file into a database """, formatter_class=argparse.RawDescriptionHelpFormatter) parser.parse_args() -indexFile = "enwiki-20220501-pages-articles-multistream-index.txt.bz2" # Had about 22e6 lines -indexDb = "dumpIndex.db" +indexFile = 'enwiki-20220501-pages-articles-multistream-index.txt.bz2' # Had about 22e6 lines +indexDb = 'dumpIndex.db' if os.path.exists(indexDb): - raise Exception(f"ERROR: Existing {indexDb}") -print("Creating database") + raise Exception(f'ERROR: Existing {indexDb}') +print('Creating database') dbCon = sqlite3.connect(indexDb) dbCur = dbCon.cursor() -dbCur.execute("CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)") +dbCur.execute('CREATE TABLE offsets (title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT)') -print("Iterating through index file") -lineRegex = re.compile(r"([^:]+):([^:]+):(.*)") +print('Iterating through index file') +lineRegex = re.compile(r'([^:]+):([^:]+):(.*)') lastOffset = 0 lineNum = 0 -entriesToAdd = [] +entriesToAdd: list[tuple[str, str]] = [] with bz2.open(indexFile, mode='rt') as file: for line in file: lineNum += 1 if lineNum % 1e5 == 0: - print(f"At line {lineNum}") + print(f'At line {lineNum}') # match = lineRegex.fullmatch(line.rstrip()) - (offset, pageId, title) = match.group(1,2,3) - offset = int(offset) + assert match is not None + offsetStr, pageId, title = match.group(1,2,3) + offset = int(offsetStr) if offset > lastOffset: - for (t, p) in entriesToAdd: + for t, p in entriesToAdd: try: - dbCur.execute("INSERT INTO offsets VALUES (?, ?, ?, ?)", (t, p, lastOffset, offset)) + dbCur.execute('INSERT INTO offsets VALUES (?, ?, ?, ?)', (t, int(p), lastOffset, offset)) except sqlite3.IntegrityError as e: # Accounts for certain entries in the file that have the same title - print(f"Failed on title \"{t}\": {e}", file=sys.stderr) + print(f'Failed on title "{t}": {e}', file=sys.stderr) entriesToAdd = [] lastOffset = offset - entriesToAdd.append([title, pageId]) -for (title, pageId) in entriesToAdd: + entriesToAdd.append((title, pageId)) +for title, pageId in entriesToAdd: try: - dbCur.execute("INSERT INTO offsets VALUES (?, ?, ?, ?)", (title, pageId, lastOffset, -1)) + dbCur.execute('INSERT INTO offsets VALUES (?, ?, ?, ?)', (title, int(pageId), lastOffset, -1)) except sqlite3.IntegrityError as e: - print(f"Failed on title \"{t}\": {e}", file=sys.stderr) + print(f'Failed on title "{t}": {e}', file=sys.stderr) -print("Closing database") +print('Closing database') dbCon.commit() dbCon.close() diff --git a/backend/tolData/enwiki/genImgData.py b/backend/tolData/enwiki/genImgData.py index b5d546d..00140f6 100755 --- a/backend/tolData/enwiki/genImgData.py +++ b/backend/tolData/enwiki/genImgData.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 -import sys, re +import re import bz2, html, urllib.parse import sqlite3 @@ -15,117 +15,117 @@ will skip already-processed page IDs. parser.parse_args() def getInputPageIds(): - pageIds = set() - dbCon = sqlite3.connect("../data.db") + pageIds: set[int] = set() + dbCon = sqlite3.connect('../data.db') dbCur = dbCon.cursor() - for (pageId,) in dbCur.execute("SELECT id from wiki_ids"): + for (pageId,) in dbCur.execute('SELECT id from wiki_ids'): pageIds.add(pageId) dbCon.close() return pageIds -dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2" -indexDb = "dumpIndex.db" -imgDb = "imgData.db" # The database to create -idLineRegex = re.compile(r"<id>(.*)</id>") -imageLineRegex = re.compile(r".*\| *image *= *([^|]*)") -bracketImageRegex = re.compile(r"\[\[(File:[^|]*).*]]") -imageNameRegex = re.compile(r".*\.(jpg|jpeg|png|gif|tiff|tif)", flags=re.IGNORECASE) -cssImgCropRegex = re.compile(r"{{css image crop\|image *= *(.*)", flags=re.IGNORECASE) +dumpFile = 'enwiki-20220501-pages-articles-multistream.xml.bz2' +indexDb = 'dumpIndex.db' +imgDb = 'imgData.db' # The database to create +idLineRegex = re.compile(r'<id>(.*)</id>') +imageLineRegex = re.compile(r'.*\| *image *= *([^|]*)') +bracketImageRegex = re.compile(r'\[\[(File:[^|]*).*]]') +imageNameRegex = re.compile(r'.*\.(jpg|jpeg|png|gif|tiff|tif)', flags=re.IGNORECASE) +cssImgCropRegex = re.compile(r'{{css image crop\|image *= *(.*)', flags=re.IGNORECASE) -print("Getting input page-ids") +print('Getting input page-ids') pageIds = getInputPageIds() -print(f"Found {len(pageIds)}") +print(f'Found {len(pageIds)}') -print("Opening databases") +print('Opening databases') indexDbCon = sqlite3.connect(indexDb) indexDbCur = indexDbCon.cursor() imgDbCon = sqlite3.connect(imgDb) imgDbCur = imgDbCon.cursor() -print("Checking tables") -if imgDbCur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='page_imgs'").fetchone() == None: +print('Checking tables') +if imgDbCur.execute('SELECT name FROM sqlite_master WHERE type="table" AND name="page_imgs"').fetchone() is None: # Create tables if not present - imgDbCur.execute("CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)") # img_name may be NULL - imgDbCur.execute("CREATE INDEX page_imgs_idx ON page_imgs(img_name)") + imgDbCur.execute('CREATE TABLE page_imgs (page_id INT PRIMARY KEY, img_name TEXT)') # img_name may be NULL + imgDbCur.execute('CREATE INDEX page_imgs_idx ON page_imgs(img_name)') else: # Check for already-processed page IDs numSkipped = 0 - for (pid,) in imgDbCur.execute("SELECT page_id FROM page_imgs"): + for (pid,) in imgDbCur.execute('SELECT page_id FROM page_imgs'): if pid in pageIds: pageIds.remove(pid) numSkipped += 1 else: - print(f"WARNING: Found already-processed page ID {pid} which was not in input set") - print(f"Will skip {numSkipped} already-processed page IDs") + print(f'WARNING: Found already-processed page ID {pid} which was not in input set') + print(f'Will skip {numSkipped} already-processed page IDs') -print("Getting dump-file offsets") -offsetToPageids = {} -offsetToEnd = {} # Maps chunk-start offsets to their chunk-end offsets +print('Getting dump-file offsets') +offsetToPageids: dict[int, list[int]] = {} +offsetToEnd: dict[int, int] = {} # Maps chunk-start offsets to their chunk-end offsets iterNum = 0 for pageId in pageIds: iterNum += 1 if iterNum % 1e4 == 0: - print(f"At iteration {iterNum}") + print(f'At iteration {iterNum}') # - query = "SELECT offset, next_offset FROM offsets WHERE id = ?" - row = indexDbCur.execute(query, (pageId,)).fetchone() - if row == None: - print(f"WARNING: Page ID {pageId} not found") + query = 'SELECT offset, next_offset FROM offsets WHERE id = ?' + row: tuple[int, int] | None = indexDbCur.execute(query, (pageId,)).fetchone() + if row is None: + print(f'WARNING: Page ID {pageId} not found') continue - (chunkOffset, endOffset) = row + chunkOffset, endOffset = row offsetToEnd[chunkOffset] = endOffset if chunkOffset not in offsetToPageids: offsetToPageids[chunkOffset] = [] offsetToPageids[chunkOffset].append(pageId) -print(f"Found {len(offsetToEnd)} chunks to check") +print(f'Found {len(offsetToEnd)} chunks to check') -print("Iterating through chunks in dump file") -def getImageName(content): - " Given an array of text-content lines, tries to return an infoxbox image name, or None " +print('Iterating through chunks in dump file') +def getImageName(content: list[str]) -> str | None: + """ Given an array of text-content lines, tries to return an infoxbox image name, or None """ # Doesn't try and find images in outside-infobox [[File:...]] and <imagemap> sections for line in content: match = imageLineRegex.match(line) - if match != None: + if match is not None: imageName = match.group(1).strip() - if imageName == "": + if imageName == '': return None imageName = html.unescape(imageName) # Account for {{... - if imageName.startswith("{"): + if imageName.startswith('{'): match = cssImgCropRegex.match(imageName) - if match == None: + if match is None: return None imageName = match.group(1) # Account for [[File:...|...]] - if imageName.startswith("["): + if imageName.startswith('['): match = bracketImageRegex.match(imageName) - if match == None: + if match is None: return None imageName = match.group(1) # Account for <!-- - if imageName.find("<!--") != -1: + if imageName.find('<!--') != -1: return None # Remove an initial 'File:' - if imageName.startswith("File:"): + if imageName.startswith('File:'): imageName = imageName[5:] # Remove an initial 'Image:' - if imageName.startswith("Image:"): + if imageName.startswith('Image:'): imageName = imageName[6:] # Check for extension match = imageNameRegex.match(imageName) - if match != None: + if match is not None: imageName = match.group(0) imageName = urllib.parse.unquote(imageName) imageName = html.unescape(imageName) # Intentionally unescaping again (handles some odd cases) - imageName = imageName.replace("_", " ") + imageName = imageName.replace('_', ' ') return imageName # Exclude lines like: | image = <imagemap> return None return None with open(dumpFile, mode='rb') as file: iterNum = 0 - for (pageOffset, endOffset) in offsetToEnd.items(): + for pageOffset, endOffset in offsetToEnd.items(): iterNum += 1 if iterNum % 100 == 0: - print(f"At iteration {iterNum}") + print(f'At iteration {iterNum}') # pageIds = offsetToPageids[pageOffset] # Jump to chunk @@ -137,14 +137,14 @@ with open(dumpFile, mode='rb') as file: lineIdx = 0 while lineIdx < len(lines): # Look for <page> - if lines[lineIdx].lstrip() != "<page>": + if lines[lineIdx].lstrip() != '<page>': lineIdx += 1 continue # Check page id lineIdx += 3 idLine = lines[lineIdx].lstrip() match = idLineRegex.fullmatch(idLine) - if match == None or int(match.group(1)) not in pageIds: + if match is None or int(match.group(1)) not in pageIds: lineIdx += 1 continue pageId = int(match.group(1)) @@ -152,35 +152,35 @@ with open(dumpFile, mode='rb') as file: # Look for <text> in <page> foundText = False while lineIdx < len(lines): - if not lines[lineIdx].lstrip().startswith("<text "): + if not lines[lineIdx].lstrip().startswith('<text '): lineIdx += 1 continue foundText = True # Get text content - content = [] + content: list[str] = [] line = lines[lineIdx] - content.append(line[line.find(">") + 1:]) + content.append(line[line.find('>') + 1:]) lineIdx += 1 foundTextEnd = False while lineIdx < len(lines): line = lines[lineIdx] - if not line.endswith("</text>"): + if not line.endswith('</text>'): content.append(line) lineIdx += 1 continue foundTextEnd = True - content.append(line[:line.rfind("</text>")]) + content.append(line[:line.rfind('</text>')]) # Look for image-filename imageName = getImageName(content) - imgDbCur.execute("INSERT into page_imgs VALUES (?, ?)", (pageId, imageName)) + imgDbCur.execute('INSERT into page_imgs VALUES (?, ?)', (pageId, imageName)) break if not foundTextEnd: - print(f"WARNING: Did not find </text> for page id {pageId}") + print(f'WARNING: Did not find </text> for page id {pageId}') break if not foundText: - print(f"WARNING: Did not find <text> for page id {pageId}") + print(f'WARNING: Did not find <text> for page id {pageId}') -print("Closing databases") +print('Closing databases') indexDbCon.close() imgDbCon.commit() imgDbCon.close() diff --git a/backend/tolData/enwiki/genPageviewData.py b/backend/tolData/enwiki/genPageviewData.py index f0901b2..6a5d79c 100755 --- a/backend/tolData/enwiki/genPageviewData.py +++ b/backend/tolData/enwiki/genPageviewData.py @@ -5,10 +5,10 @@ from collections import defaultdict import bz2, sqlite3 import argparse -parser = argparse.ArgumentParser(description=''' +parser = argparse.ArgumentParser(description=""" Reads through wikimedia files containing pageview counts, computes average counts, and adds them to a database -''', formatter_class=argparse.RawDescriptionHelpFormatter) +""", formatter_class=argparse.RawDescriptionHelpFormatter) args = parser.parse_args() pageviewFiles = glob.glob('./pageviews/pageviews-*-user.bz2') @@ -26,7 +26,7 @@ if os.path.exists(dbFile): # platform (eg: mobile-web), monthly view count, # hourly count string (eg: A1B2 means 1 view on day 1 and 2 views on day 2) namespaceRegex = re.compile(r'[a-zA-Z]+:') -titleToViews = defaultdict(int) +titleToViews: dict[str, int] = defaultdict(int) linePrefix = b'en.wikipedia ' for filename in pageviewFiles: print(f'Reading from {filename}') @@ -40,7 +40,7 @@ for filename in pageviewFiles: line = line[len(linePrefix):line.rfind(b' ')] # Remove first and last fields title = line[:line.find(b' ')].decode('utf-8') viewCount = int(line[line.rfind(b' ')+1:]) - if namespaceRegex.match(title) != None: + if namespaceRegex.match(title) is not None: continue # Update map titleToViews[title] += viewCount @@ -54,7 +54,7 @@ idbCur = idbCon.cursor() dbCur.execute('CREATE TABLE views (title TEXT PRIMARY KEY, id INT, views INT)') for title, views in titleToViews.items(): row = idbCur.execute('SELECT id FROM offsets WHERE title = ?', (title,)).fetchone() - if row != None: + if row is not None: wikiId = int(row[0]) dbCur.execute('INSERT INTO views VALUES (?, ?, ?)', (title, wikiId, math.floor(views / len(pageviewFiles)))) dbCon.commit() diff --git a/backend/tolData/enwiki/lookupPage.py b/backend/tolData/enwiki/lookupPage.py index e7b95f0..427aa7a 100755 --- a/backend/tolData/enwiki/lookupPage.py +++ b/backend/tolData/enwiki/lookupPage.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 -import sys, re +import sys import bz2 import sqlite3 @@ -12,24 +12,24 @@ db, and prints the corresponding <page>. parser.add_argument("title", help="The title to look up") args = parser.parse_args() -dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2" -indexDb = "dumpIndex.db" -pageTitle = args.title.replace("_", " ") +dumpFile = 'enwiki-20220501-pages-articles-multistream.xml.bz2' +indexDb = 'dumpIndex.db' +pageTitle = args.title.replace('_', ' ') -print("Looking up offset in index db") +print('Looking up offset in index db') dbCon = sqlite3.connect(indexDb) dbCur = dbCon.cursor() -query = "SELECT title, offset, next_offset FROM offsets WHERE title = ?" +query = 'SELECT title, offset, next_offset FROM offsets WHERE title = ?' row = dbCur.execute(query, (pageTitle,)).fetchone() -if row == None: - print("Title not found") +if row is None: + print('Title not found') sys.exit(0) _, pageOffset, endOffset = row dbCon.close() -print(f"Found chunk at offset {pageOffset}") +print(f'Found chunk at offset {pageOffset}') -print("Reading from wiki dump") -content = [] +print('Reading from wiki dump') +content: list[str] = [] with open(dumpFile, mode='rb') as file: # Get uncompressed chunk file.seek(pageOffset) @@ -42,25 +42,25 @@ with open(dumpFile, mode='rb') as file: pageNum = 0 while not found: line = lines[lineIdx] - if line.lstrip() == "<page>": + if line.lstrip() == '<page>': pageNum += 1 if pageNum > 100: - print("ERROR: Did not find title after 100 pages") + print('ERROR: Did not find title after 100 pages') break lineIdx += 1 titleLine = lines[lineIdx] if titleLine.lstrip() == '<title>' + pageTitle + '</title>': found = True - print(f"Found title in chunk as page {pageNum}") + print(f'Found title in chunk as page {pageNum}') content.append(line) content.append(titleLine) while True: lineIdx += 1 line = lines[lineIdx] content.append(line) - if line.lstrip() == "</page>": + if line.lstrip() == '</page>': break lineIdx += 1 -print("Content: ") -print("\n".join(content)) +print('Content: ') +print('\n'.join(content)) |
