#!/usr/bin/env python # coding: utf-8 # # Map Trove newspaper results by place of publication over time # # In [another notebook](Map-newspaper-results-by-place-of-publication.ipynb), I constructed a heatmap displaying the places of publication of articles returned by a search in Trove's newspapers zone. # # I suggested that it would be interesting to visualise changes over time. This notebook does just that by creating an animated heatmap. # # The key difference here is that instead of just getting and processing a single Trove API request, we'll need to fire off a series of API requests — one for each time interval. # # You can use this notebook to visualise your own search queries, just edit the search parameters were indicated. # #
#

If you haven't used one of these notebooks before, they're basically web pages in which you can write, edit, and run live code. They're meant to encourage experimentation, so don't feel nervous. Just try running a few cells and see what happens!.

# #

# Some tips: #

#

#
# ## Setting things up # # First we'll import the packages we need. # In[13]: # Import the libraries we need import os import folium import pandas as pd import requests from folium.plugins import HeatMapWithTime from IPython.display import display from tqdm.auto import tqdm # In[14]: get_ipython().run_cell_magic('capture', '', '# Load variables from the .env file if it exists\n# Use %%capture to suppress messages\n%load_ext dotenv\n%dotenv\n') # You need an [API key](http://help.nla.gov.au/trove/building-with-trove/api) to get data from Trove. Insert your key below. # In[15]: # Insert your Trove API key API_KEY = "YOUR API KEY" # Use api key value from environment variables if it is available if os.getenv("TROVE_API_KEY"): API_KEY = os.getenv("TROVE_API_KEY") # Set up some default parameters for our API query. # In[16]: # Set up default parameters for our API query params = { "zone": "newspaper", "encoding": "json", "facet": "title", "n": "1", "key": API_KEY, } API_URL = "http://api.trove.nla.gov.au/v2/result" # In[17]: def format_facets(data): """ Extract and normalise the facet data """ # Check to make sure we have results try: facets = data["response"]["zone"][0]["facets"]["facet"]["term"] except TypeError: # No results! raise else: # Convert to DataFrame df = pd.DataFrame(facets) # Select the columns we want df = df[["display", "count"]] # Rename the columns df.columns = ["title_id", "total"] # Make sure the total column is a number df["total"] = pd.to_numeric(df["total"], errors="coerce") return df def prepare_data(data): """ Reformat the facet data, merge with locations, and then generate a list of locations. """ # Check for results try: df = format_facets(data) except TypeError: # If there are no results just return and empty list hm_data = [] else: # Merge facets data with geolocated list of titles df_located = pd.merge(df, locations, on="title_id", how="left") # Group results by place, and calculate the total results for each df_totals = df_located.groupby(["place", "latitude", "longitude"]).sum() hm_data = [] for place in df_totals.index: # Get the total total = df_totals.loc[place]["total"] # Add the coordinates of the place to the list of locations as many times as there are articles hm_data += [[place[1], place[2]]] * total return hm_data # Get the geolocated titles data locations = pd.read_csv( "data/trove-newspaper-titles-locations.csv", dtype={"title_id": "int64"} ) # Only keep the first instance of each title locations.drop_duplicates(subset=["title_id"], keep="first", inplace=True) # ## Construct your search # # This is where you set your search keywords. Change 'weather AND wragge date:[* TO 1954]' in the cell below to anything you might enter in the Trove simple search box. Don't include a date range, as we'll be handling that separately. For example: # # `params['q'] = 'weather AND wragge'` # # `params['q'] = '"Clement Wragge"'` # # `params['q'] = 'text:"White Australia Policy"'` # # You can also limit the results to specific categories. To only search for articles, include this line: # # `params['l-category'] = 'Article'` # In[18]: # Enter your search parameters # This can be anything you'd enter in the Trove simple search box params["q"] = 'text:"White Australia"' # Remove the "#" symbol from the line below to limit the results to the article category # params['l-category'] = 'Article' # ## Set your date range # # In this example we'll use years as our time interval. We could easily change this to months, or even individual days for a fine-grained analysis. # In[19]: start_year = 1880 end_year = 1950 # ## Get the data from Trove # # We need to make an API request for each year in our date range, so we'll construct a loop. # # The cell below generates two lists. The first, `hm_series`, is a list containing the data from each API request. The second, `time_index`, is a list of the years we're getting data for. Obviously these two lists should be the same length — one dataset for each year. # In[ ]: # <-- Click the run icon hm_series = [] time_index = [] for year in tqdm(range(start_year, end_year + 1)): time_index.append(year) decade = str(year)[:3] params["l-decade"] = decade params["l-year"] = year response = requests.get(API_URL, params=params) data = response.json() hm_data = prepare_data(data) hm_series.append(hm_data) # ## Make an animated heatmap # # To create an animated heatmap we just need to feed it the `hm_series` data and time index. # In[21]: # <-- Click the run icon # Create the map m = folium.Map(location=[-30, 135], zoom_start=4) # Add the heatmap data! HeatMapWithTime(hm_series, index=time_index, auto_play=True).add_to(m) # ### Search for "White Australia" from 1880 to 1950 # In[22]: # <-- Click the run icon display(m) # ---- # # Created by [Tim Sherratt](https://timsherratt.org/) for the [GLAM Workbench](https://glam-workbench.github.io/). # Support this project by becoming a [GitHub sponsor](https://github.com/sponsors/wragge?o=esb).