aboutsummaryrefslogtreecommitdiff
path: root/backend/tol_data/enwiki
diff options
context:
space:
mode:
Diffstat (limited to 'backend/tol_data/enwiki')
-rw-r--r--backend/tol_data/enwiki/README.md22
-rwxr-xr-xbackend/tol_data/enwiki/download_img_license_info.py3
-rwxr-xr-xbackend/tol_data/enwiki/download_imgs.py1
-rwxr-xr-xbackend/tol_data/enwiki/gen_desc_data.py4
-rwxr-xr-xbackend/tol_data/enwiki/gen_dump_index_db.py2
-rwxr-xr-xbackend/tol_data/enwiki/gen_img_data.py2
-rwxr-xr-xbackend/tol_data/enwiki/gen_pageview_data.py3
-rwxr-xr-xbackend/tol_data/enwiki/lookup_page.py2
8 files changed, 19 insertions, 20 deletions
diff --git a/backend/tol_data/enwiki/README.md b/backend/tol_data/enwiki/README.md
index ba1de33..6f27d7f 100644
--- a/backend/tol_data/enwiki/README.md
+++ b/backend/tol_data/enwiki/README.md
@@ -14,12 +14,12 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
# Dump-Index Files
- `gen_dump_index_db.py` <br>
Creates a database version of the enwiki-dump index file.
-- `dumpIndex.db` <br>
+- `dump_index.db` <br>
Generated by `gen_dump_index_db.py`. <br>
Tables: <br>
- `offsets`: `title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT`
-# Description Database Files
+# Description Files
- `gen_desc_data.py` <br>
Reads through pages in the dump file, and adds short-description info to a database.
- `desc_data.db` <br>
@@ -29,20 +29,20 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
- `redirects`: `id INT PRIMARY KEY, target TEXT`
- `descs`: `id INT PRIMARY KEY, desc TEXT`
-# Image Database Files
+# Image Files
- `gen_img_data.py` <br>
- Used to find infobox image names for page IDs, storing them into a database.
-- `downloadImgLicenseInfo.py` <br>
- Used to download licensing metadata for image names, via wikipedia's online API, storing them into a database.
+ Used to find infobox image names for page IDs, and store them into a database.
+- `download_img_license_info.py` <br>
+ Used to download licensing metadata for image names, via wikipedia's online API, and store them into a database.
- `img_data.db` <br>
- Used to hold metadata about infobox images for a set of pageIDs.
+ Used to hold metadata about infobox images for a set of page IDs.
Generated using `get_enwiki_img_data.py` and `download_img_license_info.py`. <br>
Tables: <br>
- `page_imgs`: `page_id INT PRIMAY KEY, img_name TEXT` <br>
- `img_name` may be null, which means 'none found', and is used to avoid re-processing page-ids.
+ `img_name` may be null, which means 'none found', and is used to avoid re-processing page IDs.
- `imgs`: `name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT` <br>
Might lack some matches for `img_name` in `page_imgs`, due to licensing info unavailability.
-- `downloadImgs.py` <br>
+- `download_imgs.py` <br>
Used to download image files into imgs/.
# Page View Files
@@ -51,7 +51,7 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
Obtained via <https://dumps.wikimedia.org/other/pageview_complete/monthly/>.
Some format info was available from <https://dumps.wikimedia.org/other/pageview_complete/readme.html>.
- `gen_pageview_data.py` <br>
- Reads pageview/*, and creates a database holding average monthly pageview counts.
+ Reads pageview/* and `dump_index.db`, and creates a database holding average monthly pageview counts.
- `pageview_data.db` <br>
Generated using `gen_pageview_data.py`. <br>
Tables: <br>
@@ -60,4 +60,4 @@ This directory holds files obtained/derived from [English Wikipedia](https://en.
# Other Files
- `lookup_page.py` <br>
Running `lookup_page.py title1` looks in the dump for a page with a given title,
- and prints the contents to stdout. Uses dumpIndex.db.
+ and prints the contents to stdout. Uses dump_index.db.
diff --git a/backend/tol_data/enwiki/download_img_license_info.py b/backend/tol_data/enwiki/download_img_license_info.py
index 0a809ac..17e15b4 100755
--- a/backend/tol_data/enwiki/download_img_license_info.py
+++ b/backend/tol_data/enwiki/download_img_license_info.py
@@ -89,7 +89,7 @@ def downloadInfo(imgDb: str) -> None:
continue
# Parse response-object
if 'query' not in responseObj or 'pages' not in responseObj['query']:
- print('WARNING: Response object for doesn\'t have page data')
+ print('WARNING: Response object doesn\'t have page data')
print('\tImage batch: ' + '|'.join(imgBatch))
if 'error' in responseObj:
errorCode = responseObj['error']['code']
@@ -138,7 +138,6 @@ def downloadInfo(imgDb: str) -> None:
credit = html.unescape(credit)
credit = urllib.parse.unquote(credit)
# Add to db
- print((title, license, artist, credit, restrictions, url))
dbCur.execute('INSERT INTO imgs VALUES (?, ?, ?, ?, ?, ?)',
(title, license, artist, credit, restrictions, url))
#
diff --git a/backend/tol_data/enwiki/download_imgs.py b/backend/tol_data/enwiki/download_imgs.py
index ba874e1..c6a1c21 100755
--- a/backend/tol_data/enwiki/download_imgs.py
+++ b/backend/tol_data/enwiki/download_imgs.py
@@ -75,7 +75,6 @@ def downloadImgs(imgDb: str, outDir: str, timeout: int) -> None:
print(f'WARNING: No filename extension found in URL {url}')
continue
outFile = os.path.join(outDir, f'{pageId}{extension}')
- print(outFile)
headers = {
'user-agent': USER_AGENT,
'accept-encoding': 'gzip',
diff --git a/backend/tol_data/enwiki/gen_desc_data.py b/backend/tol_data/enwiki/gen_desc_data.py
index 0dca16b..b3fde52 100755
--- a/backend/tol_data/enwiki/gen_desc_data.py
+++ b/backend/tol_data/enwiki/gen_desc_data.py
@@ -1,8 +1,8 @@
#!/usr/bin/python3
"""
-Reads through the wiki dump, and attempts to parse short-descriptions,
-and add them to a database
+Reads through the wiki dump, attempts to parse short-descriptions,
+and adds them to a database
"""
# In testing, this script took over 10 hours to run, and generated about 5GB
diff --git a/backend/tol_data/enwiki/gen_dump_index_db.py b/backend/tol_data/enwiki/gen_dump_index_db.py
index 5f21c9b..5778680 100755
--- a/backend/tol_data/enwiki/gen_dump_index_db.py
+++ b/backend/tol_data/enwiki/gen_dump_index_db.py
@@ -8,7 +8,7 @@ import bz2
import sqlite3
INDEX_FILE = 'enwiki-20220501-pages-articles-multistream-index.txt.bz2' # Had about 22e6 lines
-DB_FILE = 'dumpIndex.db'
+DB_FILE = 'dump_index.db'
def genData(indexFile: str, dbFile: str) -> None:
""" Reads the index file and creates the db """
diff --git a/backend/tol_data/enwiki/gen_img_data.py b/backend/tol_data/enwiki/gen_img_data.py
index d4696f0..040f223 100755
--- a/backend/tol_data/enwiki/gen_img_data.py
+++ b/backend/tol_data/enwiki/gen_img_data.py
@@ -13,7 +13,7 @@ import os, bz2, html, urllib.parse
import sqlite3
DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2'
-INDEX_DB = 'dumpIndex.db'
+INDEX_DB = 'dump_index.db'
IMG_DB = 'img_data.db' # The database to create
DB_FILE = os.path.join('..', 'data.db')
#
diff --git a/backend/tol_data/enwiki/gen_pageview_data.py b/backend/tol_data/enwiki/gen_pageview_data.py
index ce3b674..8aee1cc 100755
--- a/backend/tol_data/enwiki/gen_pageview_data.py
+++ b/backend/tol_data/enwiki/gen_pageview_data.py
@@ -12,7 +12,7 @@ from collections import defaultdict
import bz2, sqlite3
PAGEVIEW_FILES = glob.glob('./pageviews/pageviews-*-user.bz2')
-DUMP_INDEX_DB = 'dumpIndex.db'
+DUMP_INDEX_DB = 'dump_index.db'
DB_FILE = 'pageview_data.db'
def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None:
@@ -42,6 +42,7 @@ def genData(pageviewFiles: list[str], dumpIndexDb: str, dbFile: str) -> None:
if namespaceRegex.match(title) is not None:
continue
# Update map
+ title = title.replace('_', ' ')
titleToViews[title] += viewCount
print(f'Found {len(titleToViews)} titles')
#
diff --git a/backend/tol_data/enwiki/lookup_page.py b/backend/tol_data/enwiki/lookup_page.py
index 8ef1229..f744818 100755
--- a/backend/tol_data/enwiki/lookup_page.py
+++ b/backend/tol_data/enwiki/lookup_page.py
@@ -10,7 +10,7 @@ import bz2
import sqlite3
DUMP_FILE = 'enwiki-20220501-pages-articles-multistream.xml.bz2'
-INDEX_DB = 'dumpIndex.db'
+INDEX_DB = 'dump_index.db'
def lookupPage(dumpFile: str, indexDb: str, pageTitle: str) -> None:
print('Looking up offset in index db')