#!/usr/bin/env python # coding: utf-8 # # Harvesting articles that mention "Anzac Day" on Anzac Day # # The Trove Newspaper Harvester web app and command-line tool make it easy for you to harvest the results of a single search. But if you want to harvest very large or complex searches, you might find it easier to import the `trove_newspaper_harvester` library directly and take control of the harvesting process. # # For example, how would you harvest all of the newspaper articles mentioning "Anzac Day" that were published *on* Anzac Day, 25 April? It's possible to search for results from a single day using the `date` index. So, theoretically, you could combine multiple dates using `OR` and build a very long search query by doing something like this: # # ``` python # days = [] # for year in range(1916, 1955): # days.append(f"date:[{year}-04-24T00:00:00Z TO {year}-04-25T00:00:00Z]") # query_string = f'"anzac day" AND ({" OR ".join(days)})' # ``` # # However, if you try searching in Trove using the query string generated by this code it [returns no results](https://trove.nla.gov.au/search/category/newspapers?keyword=%22anzac%20day%22%20AND%20%28date%3A%5B1916-04-24T00%3A00%3A00Z%20TO%201916-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1917-04-24T00%3A00%3A00Z%20TO%201917-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1918-04-24T00%3A00%3A00Z%20TO%201918-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1919-04-24T00%3A00%3A00Z%20TO%201919-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1920-04-24T00%3A00%3A00Z%20TO%201920-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1921-04-24T00%3A00%3A00Z%20TO%201921-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1922-04-24T00%3A00%3A00Z%20TO%201922-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1923-04-24T00%3A00%3A00Z%20TO%201923-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1924-04-24T00%3A00%3A00Z%20TO%201924-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1925-04-24T00%3A00%3A00Z%20TO%201925-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1926-04-24T00%3A00%3A00Z%20TO%201926-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1927-04-24T00%3A00%3A00Z%20TO%201927-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1928-04-24T00%3A00%3A00Z%20TO%201928-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1929-04-24T00%3A00%3A00Z%20TO%201929-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1930-04-24T00%3A00%3A00Z%20TO%201930-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1931-04-24T00%3A00%3A00Z%20TO%201931-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1932-04-24T00%3A00%3A00Z%20TO%201932-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1933-04-24T00%3A00%3A00Z%20TO%201933-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1934-04-24T00%3A00%3A00Z%20TO%201934-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1935-04-24T00%3A00%3A00Z%20TO%201935-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1936-04-24T00%3A00%3A00Z%20TO%201936-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1937-04-24T00%3A00%3A00Z%20TO%201937-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1938-04-24T00%3A00%3A00Z%20TO%201938-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1939-04-24T00%3A00%3A00Z%20TO%201939-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1940-04-24T00%3A00%3A00Z%20TO%201940-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1941-04-24T00%3A00%3A00Z%20TO%201941-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1942-04-24T00%3A00%3A00Z%20TO%201942-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1943-04-24T00%3A00%3A00Z%20TO%201943-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1944-04-24T00%3A00%3A00Z%20TO%201944-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1945-04-24T00%3A00%3A00Z%20TO%201945-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1946-04-24T00%3A00%3A00Z%20TO%201946-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1947-04-24T00%3A00%3A00Z%20TO%201947-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1948-04-24T00%3A00%3A00Z%20TO%201948-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1949-04-24T00%3A00%3A00Z%20TO%201949-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1950-04-24T00%3A00%3A00Z%20TO%201950-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1951-04-24T00%3A00%3A00Z%20TO%201951-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1952-04-24T00%3A00%3A00Z%20TO%201952-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1953-04-24T00%3A00%3A00Z%20TO%201953-04-25T00%3A00%3A00Z%5D%20OR%20date%3A%5B1954-04-24T00%3A00%3A00Z%20TO%201954-04-25T00%3A00%3A00Z%5D%29). Presumably it has hit a limit on query length. But even if you reduce the span of years you can get some odd results. It seems safer to search for each day independently, but how can you do that without manually creating lots of separate harvests? # # The example below does the following: # # - imports the `trove_newspaper_harvester` `Harvester` class and `prepare_query` function # - uses `prepare_query` to create the basic set of parameters (without the date search) # - loops through the desired span of years, adding the date search to the query, initialising the `Harvester`, running the harvest, and saving the results as a CSV file # # It also uses the `data_dir` and `harvest_dir` parameters of `Harvester` to tell it where to save the results. These options help you keep related searches together. In this instance, all the searches are saved in the `anzac-day` parent directory, with each individual search saved in a directory named by the year of the search query. So you end up with one results directory for each year in the span. The separate results files can be easily combined, as shown below. # ## Set things up # In[1]: import os from pathlib import Path import pandas as pd # importing the trove_newspaper_harvester! from trove_newspaper_harvester.core import Harvester, prepare_query # In[2]: get_ipython().run_cell_magic('capture', '', '# Load variables from the .env file if it exists\n# Use %%capture to suppress messages\n%load_ext dotenv\n%dotenv\n') # In[3]: # Insert your Trove API key API_KEY = "YOUR API KEY" # Use api key value from environment variables if it is available if os.getenv("TROVE_API_KEY"): API_KEY = os.getenv("TROVE_API_KEY") # ## Run the harvester # # First of all we use `prepare_query` to create a base set of parameters. We'll feed it a search for the term "anzac day" and then add in the dates later. # In[4]: query = 'https://trove.nla.gov.au/search/category/newspapers?keyword="anzac day"' query_params = prepare_query(query=query) # Next we'll loop through our desired span of years, harvesting the results each Anzac Day. For demonstration purposes I'll use a short span, harvesting results for the years 1916 to 1919. But you could just as easily harvest results from 1916 to the present. # In[ ]: # Loop through the desired span of years # Note that the end of the range is not inclusive, so you have to set it to the value above the end you want, # so this loop will output 1916, 1917, 1918 and 1919, but not 1920. for year in range(1916, 1920): # Copy the base params params = query_params.copy() # Add the date search to the query string params[ "q" ] = f"{query_params['q']} date:[{year}-04-24T00:00:00Z TO {year}-04-25T00:00:00Z]" # Initialise the harvester # The data-dir parameter sets the parent directory, in this case "anzac-day" # The harvest-dir parameter sets the directory, within the parent directory, where the current set of results will be saved, # in this case the results directory will be named by the year harvester = Harvester( query_params=params, key=API_KEY, data_dir="anzac-day", harvest_dir=str(year) ) # Harvest the results harvester.harvest() # Convert the JSON results to CSV harvester.save_csv() # The result of this code will be a series of directories and files like this: # # ``` # - anzac-day # - 1916 # - results.csv # - ro-crate-metadata.json # - harvester_config.json # - results.ndjson # - 1917 # - results.csv # - ro-crate-metadata.json # - harvester_config.json # - results.ndjson # - 1918 # - results.csv # - ro-crate-metadata.json # - harvester_config.json # - results.ndjson # - 1919 # - results.csv # - ro-crate-metadata.json # - harvester_config.json # - results.ndjson # ``` # ## Combine results # # After harvesting the data above, the results for each year will be in a separate directory. If you want to join the result sets together, you can do something like this to create a single dataframe. # In[6]: # A list to hold all the dataframes dfs = [] # Loop through the span of years for year in range(1916, 1920): # Convert the results CSV file to a dataframe and add to the list of dfs dfs.append(pd.read_csv(Path("anzac-day", str(year), "results.csv"))) # Combine the dataframes into one df = pd.concat(dfs) # View a sample df.head() # To make sure we have the combined results, we can look at the number of articles by each Anzac Day. # In[7]: df["date"].value_counts() # ---- # # Created by [Tim Sherratt](https://timsherratt.org) ([@wragge](https://twitter.com/wragge)) for the [GLAM Workbench](https://github.com/glam-workbench/). # Support this project by [becoming a GitHub sponsor](https://github.com/sponsors/wragge?o=esb). #