Datasets:
ArXiv:
File size: 8,832 Bytes
5889057 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
from collections import defaultdict
from collections.abc import Iterable
import copy
from io import TextIOBase
from functools import lru_cache
import json
from typing import Any, Optional
import click
import datetime
import ftfy # type: ignore
import requests
from tqdm import tqdm
import trafilatura # type: ignore
from humanfriendly import format_number
# Download parameters
DOWNLOAD_TIMEOUT = 10 # Timeout in seconds of each HTTP request
DOWNLOAD_BUFFER_SIZE = 65_536 # Size of each HTTP request chunk in bytes
CODE_MAX_NUM_CHARS = 1_000_000 # Downloaded source code character threshold (applied after decoding to text)
# Extraction parameters
INCLUDE_COMMENTS = False # Whether to include WP user comments in extracted text
INCLUDE_TABLES = True # Whether to include markdown for HTML tables in extracted text
INCLUDE_LINKS = True # Whether to include markdown for HTML links in extracted text
TEXT_MIN_NUM_WORDS = 100 # Extracted text word (whitespace-delimited token) threshold; None to disable
TEXT_MIN_NUM_CHARS = None # Extracted text character threshold; None to disable
class DownloadTooLarge(Exception):
pass
def download_url(url: str) -> tuple[Optional[str], dict[str, Any]]:
"""
Fetch content at URL, returning downloaded source code and metadata.
Metadata is represented as a dictionary containing Citation keys and values.
"""
source_code: Optional[str] = None
content_type: Optional[str] = None
num_chars: Optional[int] = None
error: Optional[str] = None
download_date = datetime.datetime.now().astimezone().isoformat()
try:
with requests.get(url, stream=True, timeout=DOWNLOAD_TIMEOUT) as response:
content_type = response.headers.get('Content-Type')
try:
content = ''
for chunk in response.iter_content(chunk_size=DOWNLOAD_BUFFER_SIZE, decode_unicode=True):
content += chunk
if len(content) > CODE_MAX_NUM_CHARS:
raise DownloadTooLarge()
source_code = content
num_chars = len(content)
except DownloadTooLarge:
source_code = None
error = 'Download is too large'
except Exception as ex:
source_code = None
error = f'{type(ex).__name__}: {ex}'
if not source_code:
source_code = None
error = 'Download is empty'
metadata = dict(
source_code_content_type=content_type,
source_code_num_chars=num_chars,
source_download_date=download_date,
source_download_error=error,
)
return (source_code, metadata)
def count_words(text: str) -> int:
return len(text.split())
def has_sufficient_word_count(source_text: str) -> bool:
return not TEXT_MIN_NUM_WORDS or count_words(source_text) > TEXT_MIN_NUM_WORDS
def has_sufficient_char_count(source_text: str) -> bool:
return not TEXT_MIN_NUM_CHARS or len(source_text) > TEXT_MIN_NUM_CHARS
def extract_source_text(source_code: str) -> dict[str, Any]:
"""
Given a source's HTML, extract and return textual content and error information
as a dictionary containing citation keys and values.
"""
text: Optional[str] = None
error: Optional[str] = None
try:
text = trafilatura.extract(
source_code,
include_comments=INCLUDE_COMMENTS,
include_tables=INCLUDE_TABLES,
include_links=INCLUDE_LINKS,
)
if text is not None:
text = ftfy.fix_text(text)
except Exception as ex:
error = f'{type(ex).__name__}: {ex}'
text = None
else:
if not text:
error = 'Text is empty'
text = None
elif not has_sufficient_word_count(text):
error = f'Text is too short ({format_number(count_words(text))} words)'
text = None
elif not has_sufficient_char_count(text):
error = f'Text is too short ({format_number(len(text))} characters)'
text = None
return dict(
source_text=text,
source_extract_error=error,
)
@lru_cache
def scrape_source_fields(url: str) -> dict[str, Any]:
"""
Download source code at URL and extract text from the downloaded source code,
returning a dictionary of fields to add to the corresponding citation.
This abstraction is a little awkward, but it facilitates caching.
"""
download_fields: dict[str, Any] = {}
extraction_fields: dict[str, Any] = {}
if url:
(source_code, download_fields) = download_url(url)
if source_code:
extraction_fields = extract_source_text(source_code)
return download_fields | extraction_fields
def scrape_source(citation: dict[str, Any]) -> dict[str, Any]:
"""
Scrape sources for citation, storing scraped content in citation *in-place.*
"""
if citation['url']:
citation['dehydrated_citation'] = copy.deepcopy(citation)
citation.update(scrape_source_fields(citation['url']))
return citation
def update_citations_in_excerpts_with_citations(
excerpts_with_citations: list[dict[str, Any]],
new_citations: Iterable[dict[str, Any]],
) -> list[dict[str, Any]]:
"""
Replace citations in `excerpts_with_citations` with the corresponding citations from `new_citations`,
EXCEPT for the `char_index` field (leaving it as-is).
Citations are aligned/matched by their content field.
If a citation content string appears multiple times in `new_citations`, matching citations in
`excerpts_with_citations` will be replaced with the final match in `new_citations`.
`excerpts_with_citations` is modified *in-place,* and the updated list is returned.
"""
ewc_citation_indices_by_key: dict[str, set[tuple[int, int]]] = defaultdict(set)
for (i, ewc) in enumerate(excerpts_with_citations):
for (j, citation) in enumerate(ewc['citations']):
key = citation['content']
ewc_citation_indices_by_key[key].add((i, j))
for new_citation in new_citations:
key = new_citation['content']
for (i, j) in ewc_citation_indices_by_key[key]:
excerpts_with_citations[i]['citations'][j] |= {
k: v
for (k, v) in new_citation.items()
if k != 'char_index'
}
return excerpts_with_citations # was modified in-place
def scrape_article_sources(article: dict[str, Any]) -> dict[str, Any]:
"""
Scrape sources for all web citations in article, storing scraped content in article citation
objects *in-place.* Return modified article for convenience.
"""
all_citations = []
for element in article['elements']:
if element['type'] == 'paragraph':
for sentence in element['sentences']:
for citation in sentence['citations']:
scrape_source(citation) # modifies in-place
all_citations.append(citation)
elif element['type'] == 'heading':
for citation in element['citations']:
scrape_source(citation) # modifies in-place
all_citations.append(citation)
# modifies in-place
update_citations_in_excerpts_with_citations(article['excerpts_with_citations'], all_citations)
return article
@click.command(context_settings={'show_default': True})
@click.argument('input_file', type=click.File())
@click.argument('output_file', type=click.File(mode='w'))
@click.option('--stream', '-s', is_flag=True,
help='Read one article at a time, rehydrating it and writing the output article before reading the next'
' article. The default behavior is to read all articles into memory before processing them.')
def main(input_file: TextIOBase, output_file: TextIOBase, stream: bool) -> None:
"""
Scrape sources for all MegaWika 2 articles in JSON-lines (one JSON-encoded article per line) file INPUT_FILE,
updating each web citation with source-scraping content and metadata and writing the articles with source content
to JSON-lines file OUTPUT_FILE.
Each web citation will be updated with `source_text` *and* source-scraping metadata fields; the original
(dehydrated, metadata-only) web citation will be stored under the new citation field `dehydrated_citation`.
"""
articles: Iterable[dict[str, Any]] = (json.loads(line) for line in input_file)
if not stream:
click.echo('Reading all articles into memory')
articles = list(articles)
for article in tqdm(articles, desc='Rehydrating', unit='article'):
scrape_article_sources(article)
output_file.write(json.dumps(article) + '\n')
click.echo('Done')
if __name__ == '__main__':
main()
|