# This notebook is designed to run in Voila as an app (with the code hidden).
# To launch this notebook in Voila, just select 'View > Open with Voila in New Browser Tab'
# Your browser might ask for permission to open the new tab as a popup.
This notebook demonstrates a number of different ways of comparing versions of archived web pages. Just choose a repository, enter a url, and select two dates to see comparisons based on:
import base64
import io
import math
import os
import re
import time
from difflib import HtmlDiff
# from webdriverdownloader import GeckoDriverDownloader
from pathlib import Path
from urllib.parse import parse_qs, quote, urlparse
import arrow
import geckodriver_autoinstaller
import ipywidgets as widgets
import jsons
import pandas as pd
import PIL
import requests
import selenium
import trafilatura as tf
from bs4 import BeautifulSoup
from IPython.display import HTML, display
from PIL import Image
from selenium import webdriver
from selenium.webdriver.common.by import By
from sklearn.feature_extraction.text import TfidfVectorizer
from slugify import slugify
# gdd = GeckoDriverDownloader()
# geckodriver = gdd.download_and_install("v0.30.0")[1]
geckodriver_autoinstaller.install()
# Add styles for the diff
HTML(
"<style>.diff_add {background-color: #d0e9c6;}.diff_sub {background-color: #ebcccc;} table.diff, table.diff thead {border: 1px solid black;} table.diff {table-layout: fixed; width: 100%;} th.diff_next, td.diff_next {width: 4%;} table.diff th.diff_header {text-align: left;} td {word-wrap: break-word;}</style>"
)
def is_memento(url):
"""
Is this url a Memento? Checks for the presence of a timestamp.
"""
return bool(re.search(r"/(\d{12}|\d{14})(?:id_|mp_|if_)*/http", url))
def get_timestamp(url):
"""
Extract the timestamp from a Memento
"""
return re.search(r"/(\d{12}|\d{14})(?:if_|mp_|id_)*/", url).group(1)
def get_dates(page_data):
"""
Return formatted dates of the saved pages.
"""
dates = []
for capture in page_data:
dates.append(format_date_from_timestamp(capture["url"]))
return dates
def get_html(url):
"""
Retrieve the original HTML content of an archived page.
Follow redirects if they go to another archived page.
Return the (possibly redirected) url from the response and the HTML content.
"""
# Adding the id_ hint tells the archive to give us the original harvested version, without any rewriting.
url = re.sub(r"/(\d{12}|\d{14})(?:mp_)*/http", r"/\1id_/http", url)
response = requests.get(url, allow_redirects=True)
# Some captures might redirect themselves to live versions
# If the redirected url doesn't look like a Memento rerun this without redirection
if not is_memento(response.url):
response = requests.get(url, allow_redirects=False)
return {"url": response.url, "html": response.content}
def get_all_text(capture_data):
"""
Get all the human visible text from a web page, including headers, footers, and navigation.
Does some cleaning up to remove multiple spaces, tabs, and newlines.
"""
try:
text = BeautifulSoup(capture_data["html"]).get_text()
except TypeError:
return None
else:
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
# Remove multiple spaces or tabs with a single space
text = re.sub(r"( |\t){2,}", " ", text)
# Remove leading spaces
text = re.sub(r"\n ", "\n", text)
# Remove leading newlines
text = re.sub(r"^\n*", "", text)
return text
def get_main_text(capture_data):
"""
Get only the main text from a page, excluding boilerplate and navigation.
"""
text = tf.extract(capture_data["html"])
return text
def load_data(urls):
"""
Load all the content of the specified urls into the page_data list.
Add in the text and main text.
"""
global page_data
for i, url in enumerate(urls):
# Get the HTML of the archives page
page_data.append(get_html(url))
for capture in page_data:
# Add the human-readable text
capture["text"] = get_all_text(capture)
# Add the main text
capture["main_text"] = get_main_text(capture)
# METADATA
def get_page_metadata(html):
"""
Get the metadata from a page extracted by Trafilatura
"""
return jsons.dump(tf.metadata.extract_metadata(html))
def get_metadata(page_data):
"""
Get metadata from all saved pages.
"""
metadata = []
for capture in page_data:
metadata.append(get_page_metadata(capture["html"]))
return metadata
def display_metadata(page_data):
"""
Display the extracted metadata.
"""
metadata = get_metadata(page_data)
# Get the formatted dates of the two pages
dates = get_dates(page_data)
# Use Pandas to make nice tables, using the dates as index
df = pd.DataFrame(metadata, index=dates)
with md_out:
display(HTML("<hr><h2>Metadata</h2>"))
display(df)
# STATISTICS
def size_in_bytes(html):
"""
The HTML should already be a bytes string, so len should give us the number of bytes.
"""
return len(html)
def number_of_words(text):
"""
Split text on whitespace and count resulting words.
(Might include some punctuation as well.)
"""
try:
words = len(text.split())
except AttributeError:
words = 0
return words
def get_summary_data(capture):
"""
Compile some summary statistics about a page.
"""
summary = {
"File size (bytes)": size_in_bytes(capture["html"]),
"Number of words (all text)": number_of_words(capture["text"]),
"Number of words (main text)": number_of_words(capture["main_text"]),
}
return summary
def get_summaries(page_data):
"""
Get summaries of all the saved pages.
"""
summaries = []
for capture in page_data:
summaries.append(get_summary_data(capture))
return summaries
def display_summaries(page_data):
"""
Display the summaries using Pandas.
"""
summaries = get_summaries(page_data)
dates = get_dates(page_data)
df = pd.DataFrame(summaries, index=dates)
with stats_out:
display(HTML("<hr><h2>Statistics</h2>"))
# Include thousands separator
display(df.head().style.format("{:,.0f}"))
# LINKS
def link_is_local(site, href):
"""
Check to see if a link is internal or external by looking to see if it includes the current domain.
"""
# Relative urls will always be local of course
if href.startswith("http") and site not in href.lower():
return False
else:
return True
def get_site_from_url(url):
# Get the current domain from the url
site = re.search(
r"(\d{12}|\d{14})(?:id_)*/https*://(?:.*@){0,1}(.*?)(?:\:\d*){0,1}/", url
).group(1)
# Remove any wwws
site = re.sub(r"^www\d*\.", "", site)
return site
def get_links_in_page(capture):
"""
Extract internal and external links from a html page.
"""
internal_links = []
external_links = []
site = get_site_from_url(capture["url"])
soup = BeautifulSoup(capture["html"])
links = soup.find_all("a")
for link in links:
try:
href = link["href"]
except KeyError:
pass
else:
if link_is_local(site, href):
if href not in internal_links:
internal_links.append(href)
else:
if href not in external_links:
external_links.append(href)
return {"internal": internal_links, "external": external_links}
def get_links(page_data):
"""
Extract link info from all saved pages.
"""
all_links = []
for capture in page_data:
links = get_links_in_page(capture)
all_links.append(links)
return all_links
def display_link_data(dates, all_links):
"""
Display the number of links in saved pages.
"""
totals = []
for links in all_links:
totals.append(
{
"Total internal links": len(links["internal"]),
"Total external links": len(links["external"]),
}
)
df = pd.DataFrame(totals, index=dates)
display(df)
def make_clickable(val):
"""
Make the value of a Pandas cell into a clickable link.
"""
return f'<a href="{val}">{val}</a>' if val is not None else ""
def list_external_links(dates, all_links):
"""
Display a list of external links using Pandas.
"""
# Put links into a dataframe, then transpose to make dates into columns
df = pd.DataFrame([link["external"] for link in all_links], index=dates).T
# Make links clickable and align left
df_styler = df.style.format(make_clickable).set_properties(**{"text-align": "left"})
# Make the headers left aligned as well
df_styler.set_table_styles([dict(selector="th", props=[("text-align", "left")])])
# Display without the index
display(df_styler.hide(axis="index"))
def display_links(page_data):
"""
Extract and display information about links in the saved pages.
"""
all_links = get_links(page_data)
dates = get_dates(page_data)
with links_out:
display(HTML("<hr><h2>Links</h2>"))
display_link_data(dates, all_links)
display(HTML("<h4>External links</h4>"))
list_external_links(dates, all_links)
# SIMILARITY
def calculate_similarity(text1, text2):
"""
Calculate cosine similarity of two texts.
"""
try:
tfidf = TfidfVectorizer(min_df=1).fit_transform([text1, text2])
except AttributeError:
return None
return (tfidf * tfidf.T).A[0][1]
def calculate_similarities(page_data):
"""
Calculate cosine similarities for all the text, and the main text only, of the saved pages.
"""
similarities = {
"All text": calculate_similarity(page_data[0]["text"], page_data[1]["text"]),
"Main text": calculate_similarity(
page_data[0]["main_text"], page_data[1]["main_text"]
),
}
return similarities
def display_similarities(page_data):
"""
Display the similarity values.
"""
similarities = calculate_similarities(page_data)
df = pd.DataFrame([similarities], index=["Cosine similarity"]).T
with sim_out:
display(HTML("<hr><h2>Cosine similarity</h2>"))
display(df)
# DIFFERENCES
def process_text(capture, include="text"):
"""
Prepare extracted text for diffing, by splitting into lines, and removing any blank lines.
"""
if include == "text":
lines = [
line.strip()
for line in BeautifulSoup(capture["html"]).get_text().splitlines()
if not re.match(r"^\s*$", line)
]
# lines = capture['text'].splitlines()
elif include == "main_text":
lines = capture["main_text"].splitlines()
else:
lines = [line.decode() for line in capture["html"].splitlines()]
return lines
def format_date_link(url):
date = format_date_from_timestamp(url)
return f'<a href="{url}">{date}</a>'
def show_line_differences(page_data, include="text", context=True, numlines=0):
"""
Use difflib to show a side-by-side comparison of the text in two web pages.
"""
differ = HtmlDiff()
doc1 = process_text(page_data[0], include=include)
doc2 = process_text(page_data[1], include=include)
date1 = format_date_link(page_data[0]["url"])
date2 = format_date_link(page_data[1]["url"])
html = differ.make_table(
doc1, doc2, context=context, numlines=numlines, fromdesc=date1, todesc=date2
)
# Rewrite the table html to make the column widths work better
html = html.replace(
r'<th colspan="2" class="diff_header"',
'<th class="diff_next"></th><th class="diff_header"',
)
# Cleaning up the table output
html = html.replace('nowrap="nowrap"', "")
html = html.replace("<tbody>", "").replace("</tbody>", "")
with diff_out:
display(HTML(html))
def display_diff(e):
"""
Update the diff display when the drop downs selection change.
"""
diff_out.clear_output(wait=True)
which_text.observe(display_diff)
what_context.observe(display_diff)
with diff_out:
display(HTML("<hr><h2>Differences by line</h2>"))
display(widgets.HBox([which_text, what_context]))
show_line_differences(
page_data, include=which_text.value, context=what_context.value
)
which_text = widgets.Dropdown(
options=[
("All text", "text"),
("Main text", "main_text"),
("Complete html", "html"),
],
description="Compare:",
disabled=False,
)
what_context = widgets.Dropdown(
options=[("Just changes", True), ("Complete context", False)],
description="Context:",
disabled=False,
)
# SCREENSHOTS
wayback = ["ndhadeliver.natlib.govt.nz", "web.archive.org"]
pywb = {
"web.archive.org.au": "replayFrame",
"webarchive.nla.gov.au": "replayFrame",
"webarchive.org.uk": "replay_iframe",
"webarchive.nationalarchives.gov.uk" : "replay_iframe",
}
def get_full_page_screenshot(url, save_width=200):
"""
Gets a full page screenshot of the supplied url.
By default resizes the screenshot to a maximum width of 200px.
Provide a 'save_width' value to change this.
NOTE the webdriver sometimes fails for unknown reasons. Just try again.
"""
domain = urlparse(url)[1].replace("www.", "")
# NZ and IA inject content into the page, so we use if_ to get the original page (with rewritten urls)
if domain in wayback and "if_" not in url:
url = re.sub(r"/(\d{12}|\d{14})/http", r"/\1if_/http", url)
try:
date_str, site = re.search(
r"/(\d{12}|\d{14})(?:if_|mp_)*/https*://(.+/)", url
).groups()
except AttributeError:
# There's something wrong with the link...
# print(url)
show_error(f"{url} isn't a Memento – did you forget to select an archive?")
else:
output_dir = Path("screenshots")
output_dir.mkdir(parents=True, exist_ok=True)
ss_file = Path(output_dir, f"{slugify(site)}-{date_str}-{save_width}.png")
options = webdriver.FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(options=options)
driver.implicitly_wait(15)
driver.get(url)
# Give some time for everything to load
time.sleep(30)
driver.maximize_window()
# UK and AU use pywb in framed replay mode, so we need to switch to the framed content
if domain in pywb:
try:
driver.switch_to.frame(pywb[domain])
except selenium.common.exceptions.NoSuchFrameException:
# If we pass here we'll probably still get a ss, just not full page -- better than failing?
pass
ss = None
for tag in ["body", "html", "frameset"]:
try:
elem = driver.find_element(By.TAG_NAME, tag)
ss = elem.screenshot_as_base64
break
except (
selenium.common.exceptions.NoSuchElementException,
selenium.common.exceptions.WebDriverException,
):
pass
driver.quit()
if not ss:
show_error(f"Couldn't get a screenshot of {url} – sorry...")
else:
img = Image.open(io.BytesIO(base64.b64decode(ss)))
ratio = save_width / img.width
(width, height) = (save_width, math.ceil(img.height * ratio))
resized_img = img.resize((width, height), PIL.Image.Resampling.LANCZOS)
resized_img.save(ss_file)
return ss_file
status = widgets.Output()
def display_screenshots(urls):
html_output = []
with ss_out:
display(HTML("<hr><h2>Screenshots</h2>"))
display(status)
for url in urls:
with status:
print("Generating screenshot...")
try:
ss_file = get_full_page_screenshot(url, save_width=350)
if ss_file:
date = format_date_from_timestamp(url)
try:
display_url = re.search(
r"/(\d{12}|\d{14})(?:mp_|if_|id_)*/(.*)$", url
).group(1)
except AttributeError:
display_url = url
html_output.append(
f'<div style="float:left; margin-left: 20px;"><p><b>{date}</b><br><a href="{url.replace("if_/", "/")}">{display_url}</a></p><p><a href="{ss_file}"><img src="{ss_file}"></a><br><a href="{ss_file}">[Download]</a></p></div>'
)
status.clear_output()
ss_out.clear_output(wait=True)
with ss_out:
display(HTML("<hr><h2>Screenshots</h2>"))
display(status)
display((HTML("".join(html_output))))
except selenium.common.exceptions.WebDriverException:
show_error(f"couldn't get a screenshot of {url} – sorry...")
def show_error(message=None):
status.clear_output()
with status:
print(f"Something went wrong – {message}")
# USER INTERFACE
page_data = []
TIMEGATES = {
"nla": "https://web.archive.org.au/awa/",
"nlnz": "https://ndhadeliver.natlib.govt.nz/webarchive/wayback/",
"bl": "https://www.webarchive.org.uk/wayback/archive/",
"ia": "https://web.archive.org/web/",
"ukgwa": "https://webarchive.nationalarchives.gov.uk/ukgwa/"
}
def format_date_for_headers(iso_date, tz):
"""
Convert an ISO date (YYYY-MM-DD) to a datetime at noon in the specified timezone.
Convert the datetime to UTC and format as required by Accet-Datetime headers:
eg Fri, 23 Mar 2007 01:00:00 GMT
"""
local = arrow.get(f"{iso_date} 12:00:00 {tz}", "YYYY-MM-DD HH:mm:ss ZZZ")
gmt = local.to("utc")
return f'{gmt.format("ddd, DD MMM YYYY HH:mm:ss")} GMT'
def format_date_from_timestamp(url):
timestamp = re.search(r"/(\d{12}|\d{14})(?:if_|mp_|id_)*/", url).group(1)
return arrow.get(timestamp, "YYYYMMDDHHmmss").format("D MMMM YYYY")
def parse_links_from_headers(response):
"""
Extract original, timegate, timemap, and memento links from 'Link' header.
"""
links = response.links
return {k: v["url"] for k, v in links.items()}
def query_timegate(timegate, url, date=None, tz="Australia/Canberra"):
"""
Query the specified repository for a Memento.
"""
headers = {}
if date:
formatted_date = format_date_for_headers(date, tz)
headers["Accept-Datetime"] = formatted_date
# BL,NLNZ & UKGWA don't seem to default to latest date if no date supplied
elif not date and timegate in ["bl", "nlnz", "ukgwa"]:
formatted_date = format_date_for_headers(
arrow.utcnow().format("YYYY-MM-DD"), tz
)
headers["Accept-Datetime"] = formatted_date
# Note that you don't get a timegate response if you leave off the trailing slash, but extras don't hurt!
tg_url = (
f"{TIMEGATES[timegate]}{url}/"
if not url.endswith("/")
else f"{TIMEGATES[timegate]}{url}"
)
# print(tg_url)
# IA only works if redirects are followed -- this defaults to False with HEAD requests...
if timegate == "ia":
allow_redirects = True
else:
allow_redirects = False
response = requests.head(tg_url, headers=headers, allow_redirects=allow_redirects)
return parse_links_from_headers(response)
def get_memento(timegate, url, date=None, tz="Australia/Canberra"):
"""
If there's no memento in the results, look for an alternative.
"""
links = query_timegate(timegate, url, date, tz)
# NLNZ doesn't always seem to return a Memento, so we'll build in some fuzziness
if links:
if "memento" in links:
memento = links["memento"]
elif "prev memento" in links:
memento = links["prev memento"]
elif "next memento" in links:
memento = links["next memento"]
elif "last memento" in links:
memento = links["last memento"]
else:
memento = None
return memento
def get_mementos():
mementos = [
get_memento(repository.value, target_url.value, first_date.value),
get_memento(repository.value, target_url.value, second_date.value),
]
return mementos
def share_this(urls):
binder_url = "https://mybinder.org/v2/gh/GLAM-Workbench/web-archives/master?urlpath=/voila/render/show_diffs.ipynb"
parameter_string = quote(f"?url1={urls[0]}&url2={urls[1]}")
share_url = f"{binder_url}{parameter_string}"
with share_out:
display(HTML(f'<p>Share this: <a href="{share_url}">{share_url}</a></p>'))
def clear(e):
global page_data
page_data = []
md_out.clear_output()
stats_out.clear_output()
links_out.clear_output()
sim_out.clear_output()
diff_out.clear_output()
ss_out.clear_output()
share_out.clear_output()
def start(e):
clear("e")
if url1 and url2:
urls = [url1, url2]
else:
urls = get_mementos()
load_data(urls)
display_metadata(page_data)
display_summaries(page_data)
display_links(page_data)
display_similarities(page_data)
display_diff("e")
display_screenshots(urls)
share_this(urls)
def display_mementos(url1, url2, start_button):
memento1 = widgets.Text(value=url1, layout=widgets.Layout(width="400px"))
memento2 = widgets.Text(value=url2, layout=widgets.Layout(width="400px"))
with options_out:
display(
widgets.HBox(
[
widgets.VBox(
[
widgets.Label("First memento:"),
widgets.Label("Second memento:"),
]
),
widgets.VBox([memento1, memento2, start_button]),
],
layout=widgets.Layout(padding="20px"),
)
)
options_out = widgets.Output()
md_out = widgets.Output()
stats_out = widgets.Output()
links_out = widgets.Output()
sim_out = widgets.Output()
diff_out = widgets.Output()
ss_out = widgets.Output()
share_out = widgets.Output()
start_button = widgets.Button(description="Start", button_style="primary")
start_button.on_click(start)
query_string = os.environ.get("QUERY_STRING", "")
parameters = parse_qs(query_string)
url1 = parameters.get("url1", [""])[0]
url2 = parameters.get("url2", [""])[0]
if url1 and url1:
display_mementos(url1, url2, start_button)
else:
repository = widgets.Dropdown(
options=[
("---", ""),
("UK Web Archive", "bl"),
("UK Government Web Archive", "ukgwa"),
("National Library of Australia", "nla"),
("National Library of New Zealand", "nlnz"),
("Internet Archive", "ia"),
],
description="Archive:",
disabled=False,
)
target_url = widgets.Text(description="Target URL:")
first_date = widgets.DatePicker(description="Date 1: ", disabled=False)
second_date = widgets.DatePicker(description="Date 2: ", disabled=False)
with options_out:
display(
widgets.HBox(
[
widgets.VBox([repository, first_date]),
widgets.VBox([target_url, second_date]),
],
layout=widgets.Layout(padding="20px"),
),
widgets.HBox([start_button]),
)
display(options_out, md_out, stats_out, links_out, sim_out, diff_out, ss_out, share_out)
%%capture
%load_ext dotenv
%dotenv
# Insert some values for automated testing
if os.getenv("GW_STATUS") == "dev":
options_out.clear_output()
url1 = "https://web.archive.org.au/awa/19981206012233mp_/http://www.discontents.com.au:80/"
url2 = (
"https://web.archive.org.au/awa/20100209041537mp_/http://discontents.com.au:80/"
)
display_mementos(url1, url2, start_button)
# If values have been provided via url or above, then start automatically.
# Note that Voila widgets don't load immediately, hence the polling to
# make sure the start button exists.
if url1 and url2:
script = """
<script type="text/javascript">
function start() {
if (document.querySelector("button")) {
let button = document.querySelector("button.mod-primary");
button.click();
} else {
setTimeout(start, 5);
}
}
start();
</script>"""
display(HTML(script))
Created by Tim Sherratt for the GLAM Workbench. Support me by becoming a GitHub sponsor!
Work on this notebook was supported by the IIPC Discretionary Funding Programme 2019-2020.
The Web Archives section of the GLAM Workbench is sponsored by the British Library.