aboutsummaryrefslogtreecommitdiff
path: root/backend/data/enwiki
diff options
context:
space:
mode:
Diffstat (limited to 'backend/data/enwiki')
-rw-r--r--backend/data/enwiki/README.md73
-rwxr-xr-xbackend/data/enwiki/downloadEnwikiImgs.py2
-rwxr-xr-xbackend/data/enwiki/downloadImgLicenseInfo.py2
-rwxr-xr-xbackend/data/enwiki/genDescData.py (renamed from backend/data/enwiki/genData.py)2
-rwxr-xr-xbackend/data/enwiki/genImgData.py (renamed from backend/data/enwiki/getEnwikiImgData.py)2
5 files changed, 47 insertions, 34 deletions
diff --git a/backend/data/enwiki/README.md b/backend/data/enwiki/README.md
index 6462d7d..1c16a2e 100644
--- a/backend/data/enwiki/README.md
+++ b/backend/data/enwiki/README.md
@@ -1,39 +1,52 @@
-Downloaded Files
-================
+This directory holds files obtained from/using [English Wikipedia](https://en.wikipedia.org/wiki/Main_Page).
+
+# Downloaded Files
- enwiki-20220501-pages-articles-multistream.xml.bz2 <br>
- Obtained via <https://dumps.wikimedia.org/backup-index.html>
- (site suggests downloading from a mirror). Contains text
- content and metadata for pages in English Wikipedia
- (current revision only, excludes talk pages). Some file
- content and format information was available from
- <https://meta.wikimedia.org/wiki/Data_dumps/What%27s_available_for_download>.
+ Obtained via <https://dumps.wikimedia.org/backup-index.html> (site suggests downloading from a mirror).
+ Contains text content and metadata for pages in enwiki.
+ Some file content and format information was available from
+ <https://meta.wikimedia.org/wiki/Data_dumps/What%27s_available_for_download>.
- enwiki-20220501-pages-articles-multistream-index.txt.bz2 <br>
Obtained like above. Holds lines of the form offset1:pageId1:title1,
- providing offsets, for each page, into the dump file, of a chunk of
+ providing, for each page, an offset into the dump file of a chunk of
100 pages that includes it.
-Generated Files
-===============
+# Generated Dump-Index Files
+- genDumpIndexDb.py <br>
+ Creates an sqlite-database version of the enwiki-dump index file.
- dumpIndex.db <br>
- Holds data from the enwiki dump index file. Generated by
- genDumpIndexDb.py, and used by lookupPage.py to get content for a
- given page title. <br>
+ Generated by genDumpIndexDb.py. <br>
Tables: <br>
- - offsets: title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next\_offset INT
-- enwikiData.db <br>
- Holds data obtained from the enwiki dump file, in 'pages',
- 'redirects', and 'descs' tables. Generated by genData.py, which uses
- python packages mwxml and mwparserfromhell. <br>
+ - `offsets`: `title TEXT PRIMARY KEY, id INT UNIQUE, offset INT, next_offset INT`
+
+# Description Database Files
+- genDescData.py <br>
+ Reads through pages in the dump file, and adds short-description info to a database.
+- descData.db <br>
+ Generated by genDescData.py. <br>
Tables: <br>
- - pages: id INT PRIMARY KEY, title TEXT UNIQUE
- - redirects: id INT PRIMARY KEY, target TEXT
- - descs: id INT PRIMARY KEY, desc TEXT
-- enwikiImgs.db <br>
- Holds infobox-images obtained for some set of wiki page-ids.
- Generated by running getEnwikiImgData.py, which uses the enwiki dump
- file and dumpIndex.db. <br>
+ - `pages`: `id INT PRIMARY KEY, title TEXT UNIQUE`
+ - `redirects`: `id INT PRIMARY KEY, target TEXT`
+ - `descs`: `id INT PRIMARY KEY, desc TEXT`
+
+# Image Database Files
+- genImgData.py <br>
+ Used to find infobox image names for page IDs, storing them into a database.
+- downloadImgLicenseInfo.py <br>
+ Used to download licensing metadata for image names, via wikipedia's online API, storing them into a database.
+- imgData.db <br>
+ Used to hold metadata about infobox images for a set of pageIDs.
+ Generated using getEnwikiImgData.py and downloadImgLicenseInfo.py. <br>
Tables: <br>
- - page\_imgs: page\_id INT PRIMAY KEY, img\_name TEXT
- (img\_name may be null, which is used to avoid re-processing the page-id on a second pass)
- - imgs: name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT
- (might lack some matches for 'img_name' in 'page_imgs', due to inability to get license info)
+ - `page_imgs`: `page_id INT PRIMAY KEY, img_name TEXT` <br>
+ `img_name` may be null, which means 'none found', and is used to avoid re-processing page-ids.
+ - `imgs`: `name TEXT PRIMARY KEY, license TEXT, artist TEXT, credit TEXT, restrictions TEXT, url TEXT` <br>
+ Might lack some matches for `img_name` in `page_imgs`, due to licensing info unavailability.
+- downloadEnwikiImgs.py <br>
+ Used to download image files into imgs/.
+
+# Other Files
+- lookupPage.py <br>
+ Running `lookupPage.py title1` looks in the dump for a page with a given title,
+ and prints the contents to stdout. Uses dumpIndex.db.
+
diff --git a/backend/data/enwiki/downloadEnwikiImgs.py b/backend/data/enwiki/downloadEnwikiImgs.py
index de9b862..2929a0d 100755
--- a/backend/data/enwiki/downloadEnwikiImgs.py
+++ b/backend/data/enwiki/downloadEnwikiImgs.py
@@ -16,7 +16,7 @@ if len(sys.argv) > 1:
print(usageInfo, file=sys.stderr)
sys.exit(1)
-imgDb = "enwikiImgs.db" # About 130k image names
+imgDb = "imgData.db" # About 130k image names
outDir = "imgs"
licenseRegex = re.compile(r"cc0|cc([ -]by)?([ -]sa)?([ -][1234]\.[05])?( \w\w\w?)?", flags=re.IGNORECASE)
diff --git a/backend/data/enwiki/downloadImgLicenseInfo.py b/backend/data/enwiki/downloadImgLicenseInfo.py
index 8231fbb..097304b 100755
--- a/backend/data/enwiki/downloadImgLicenseInfo.py
+++ b/backend/data/enwiki/downloadImgLicenseInfo.py
@@ -16,7 +16,7 @@ if len(sys.argv) > 1:
print(usageInfo, file=sys.stderr)
sys.exit(1)
-imgDb = "enwikiImgs.db" # About 130k image names
+imgDb = "imgData.db" # About 130k image names
apiUrl = "https://en.wikipedia.org/w/api.php"
batchSz = 50 # Max 50
tagRegex = re.compile(r"<[^<]+>")
diff --git a/backend/data/enwiki/genData.py b/backend/data/enwiki/genDescData.py
index 3e60bb5..032dbed 100755
--- a/backend/data/enwiki/genData.py
+++ b/backend/data/enwiki/genDescData.py
@@ -13,7 +13,7 @@ if len(sys.argv) > 1:
sys.exit(1)
dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2" # 22,034,540 pages
-enwikiDb = "enwikiData.db"
+enwikiDb = "descData.db"
# Some regexps and functions for parsing wikitext
descLineRegex = re.compile("^ *[A-Z'\"]")
diff --git a/backend/data/enwiki/getEnwikiImgData.py b/backend/data/enwiki/genImgData.py
index f8bb2ee..9bd28f4 100755
--- a/backend/data/enwiki/getEnwikiImgData.py
+++ b/backend/data/enwiki/genImgData.py
@@ -21,7 +21,7 @@ def getInputPageIds():
return pageIds
dumpFile = "enwiki-20220501-pages-articles-multistream.xml.bz2"
indexDb = "dumpIndex.db"
-imgDb = "enwikiImgs.db" # Output db
+imgDb = "imgData.db" # Output db
idLineRegex = re.compile(r"<id>(.*)</id>")
imageLineRegex = re.compile(r".*\| *image *= *([^|]*)")
bracketImageRegex = re.compile(r"\[\[(File:[^|]*).*]]")