From 94a8ad9b067e5a2c442ce47ce72d1a53eb444160 Mon Sep 17 00:00:00 2001 From: Terry Truong Date: Mon, 23 Jan 2023 18:00:43 +1100 Subject: Clean up some docs and naming inconsistencies --- backend/tol_data/enwiki/README.md | 22 +++++++++++----------- .../tol_data/enwiki/download_img_license_info.py | 3 +-- backend/tol_data/enwiki/download_imgs.py | 1 - backend/tol_data/enwiki/gen_desc_data.py | 4 ++-- backend/tol_data/enwiki/gen_dump_index_db.py | 2 +- backend/tol_data/enwiki/gen_img_data.py | 2 +- backend/tol_data/enwiki/gen_pageview_data.py | 3 ++- backend/tol_data/enwiki/lookup_page.py | 2 +- 8 files changed, 19 insertions(+), 20 deletions(-) (limited to 'backend/tol_data/enwiki') diff --git a/backend/tol_data/enwiki/README.md b/backend/tol_data/enwiki/README.md index ba1de33..6f27d7f 100644 --- a/backend/tol_data/enwiki/README.md +++ b/backend/tol_data/enwiki/README.md @@ -14,12 +14,12 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. # Dump-Index Files - `gen_dump_index_db.py`
Creates a database version of the enwiki-dump index file. -- `dumpIndex.db`
+- `dump_index.db`
Generated by `gen_dump_index_db.py`.
Tables:
- `offsets`: `title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT` -# Description Database Files +# Description Files - `gen_desc_data.py`
Reads through pages in the dump file, and adds short-description info to a database. - `desc_data.db`
@@ -29,20 +29,20 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. - `redirects`: `id INT PRIMARY KEY, target TEXT` - `descs`: `id INT PRIMARY KEY, desc TEXT` -# Image Database Files +# Image Files - `gen_img_data.py`
- Used to find infobox image names for page IDs, storing them into a database. -- `downloadImgLicenseInfo.py`
- Used to download licensing metadata for image names, via wikipedia's online API, storing them into a database. + Used to find infobox image names for page IDs, and store them into a database. +- `download_img_license_info.py`
+ Used to download licensing metadata for image names, via wikipedia's online API, and store them into a database. - `img_data.db`
- Used to hold metadata about infobox images for a set of pageIDs. + Used to hold metadata about infobox images for a set of page IDs. Generated using `get_enwiki_img_data.py` and `download_img_license_info.py`.
Tables:
- `page_imgs`: `page_id INT PRIMAY KEY, img_name TEXT`
- `img_name` may be null, which means 'none found', and is used to avoid re-processing page-ids. + `img_name` may be null, which means 'none found', and is used to avoid re-processing page IDs. - `imgs`: `name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT`
Might lack some matches for `img_name` in `page_imgs`, due to licensing info unavailability. -- `downloadImgs.py`
+- `download_imgs.py`
Used to download image files into imgs/. # Page View Files @@ -51,7 +51,7 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. Obtained via . Some format info was available from . - `gen_pageview_data.py`
- Reads pageview/*, and creates a database holding average monthly pageview counts. + Reads pageview/* and `dump_index.db`, and creates a database holding average monthly pageview counts. - `pageview_data.db`
Generated using `gen_pageview_data.py`.
Tables:
@@ -60,4 +60,4 @@ This directory holds files obtained/derived from [English Wikipedia](https://en. # Other Files - `lookup_page.py`
Running `lookup_page.py title1` looks in the dump for a page with a given title, - and prints the contents to stdout. Uses dumpIndex.db. + and prints the contents to stdout. Uses dump_index.db. diff --git a/backend/tol_data/enwiki/download_img_license_info.py b/backend/tol_data/enwiki/download_img_license_info.py index 0a809ac..17e15b4 100755 --- a/backend/tol_data/enwiki/download_img_license_info.py +++ b/backend/tol_data/enwiki/download_img_license_info.py @@ -89,7 +89,7 @@ def downloadInfo(imgDb: str) -> None: continue # Parse response-object if 'query' not in responseObj or 'pages' not in responseObj['query']: - print('WARNING: Response object for doesn\'t have page data') + print('WARNING: Response object doesn\'t have page data') print('\tImage batch: ' + '|'.join(imgBatch)) if 'error' in responseObj: errorCode = responseObj['error']['code'] @@ -138,7 +138,6 @@ def downloadInfo(imgDb: str) -> None: credit = html.unescape(credit) credit = urllib.parse.unquote(credit) # Add to db - print((title, license, artist, credit, restrictions, url)) dbCur.execute('INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)', (title, license, artist, credit, restrictions, url)) # diff --git a/backend/tol_data/enwiki/download_imgs.py b/backend/tol_data/enwiki/download_imgs.py index ba874e1..c6a1c21 100755 --- a/backend/tol_data/enwiki/download_imgs.py +++ b/backend/tol_data/enwiki/download_imgs.py @@ -75,7 +75,6 @@ def downloadImgs(imgDb: str, outDir: str, timeout: int) -> None: print(f'WARNING: No filename extension found in URL {url}') continue outFile = os.path.join(outDir, f'{pageId}{extension}') - print(outFile) headers = { 'user-agent': USER_AGENT, 'accept-encoding': 'gzip', diff --git a/backend/tol_data/enwiki/gen_desc_data.py b/backend/tol_data/enwiki/gen_desc_data.py index 0dca16b..b3fde52 100755 --- a/backend/tol_data/enwiki/gen_desc_data.py +++ b/backend/tol_data/enwiki/gen_desc_data.py @@ -1,8 +1,8 @@ #!/usr/bin/python3 """ -Reads through the wiki dump, and attempts to parse short-descriptions, -and add them to a database +Reads through the wiki dump, attempts to parse short-descriptions, +and adds them to a database """ # In testing, this script took over 10 hours to run, and generated about 5GB diff --git a/backend/tol_data/enwiki/gen_dump_index_db.py b/backend/tol_data/enwiki/gen_dump_index_db.py index 5f21c9b..5778680 100755 --- a/backend/tol_data/enwiki/gen_dump_index_db.py +++ b/backend/tol_data/enwiki/gen_dump_index_db.py @@ -8,7 +8,7 @@ import bz2 import sqlite3 INDEX_FILE = 'enwiki-20220501-pages-articles-multistream-index.txt.bz2' # Had about 22e6 lines -DB_FILE = 'dumpIndex.db' +DB_FILE = 'dump_index.db' def genData(indexFile: str, dbFile: str) -> None: """ Reads the index file and creates the db """ diff --git a/backend/tol_data/enwiki/gen_img_data.py b/backend/tol_data/enwiki/gen_img_data.py index d4696f0..040f223 100755 --- a/backend/tol_data/enwiki/gen_img_data.py +++ b/backend/tol_data/enwiki/gen_img_data.py @@ -13,7 +13,7 @@ import os, bz2, html, urllib.parse import sqlite3 DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2' -INDEX_DB = 'dumpIndex.db' +INDEX_DB = 'dump_index.db' IMG_DB = 'img_data.db' # The database to create DB_FILE = os.path.join('..', 'data.db') # diff --git a/backend/tol_data/enwiki/gen_pageview_data.py b/backend/tol_data/enwiki/gen_pageview_data.py index ce3b674..8aee1cc 100755 --- a/backend/tol_data/enwiki/gen_pageview_data.py +++ b/backend/tol_data/enwiki/gen_pageview_data.py @@ -12,7 +12,7 @@ from collections import defaultdict import bz2, sqlite3 PAGEVIEW_FILES = glob.glob('./pageviews/pageviews-*-user.bz2') -DUMP_INDEX_DB = 'dumpIndex.db' +DUMP_INDEX_DB = 'dump_index.db' DB_FILE = 'pageview_data.db' def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None: @@ -42,6 +42,7 @@ def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None: if namespaceRegex.match(title) is not None: continue # Update map + title = title.replace('_', ' ') titleToViews[title] += viewCount print(f'Found {len(titleToViews)} titles') # diff --git a/backend/tol_data/enwiki/lookup_page.py b/backend/tol_data/enwiki/lookup_page.py index 8ef1229..f744818 100755 --- a/backend/tol_data/enwiki/lookup_page.py +++ b/backend/tol_data/enwiki/lookup_page.py @@ -10,7 +10,7 @@ import bz2 import sqlite3 DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2' -INDEX_DB = 'dumpIndex.db' +INDEX_DB = 'dump_index.db' def lookupPage(dumpFile: str, indexDb: str, pageTitle: str) -> None: print('Looking up offset in index db') -- cgit v1.2.3