[REFERENCE RMD FILE: http://ohi-science.org/ohiprep_v2021/globalprep/np/v2021/STEP1a_np_ornamentals_prep.html]
This analysis converts FAO commodities data into data layers used to calculate OHI 2022 global natural products scores. We will conduct this data prep on the commodities seaweed, fish oil, and ornamentals, so that we can produce $ values for weighting later on, however, our final layer (saved to the output folder) from this data prep will only consist of ornamental fish. This will also calculate a sustainability layer based off of risk and exposure for ornamental fishing.
New year of FAO data (2019). Replaced deprecated functions
(replace_at()
, spread()
,
gather()
)
Reference:
http://www.fao.org/fishery/statistics/software/fishstatj/en#downlApp
App release date: July 2021 FAO raw commodities quantity 1976_2019 FAO
raw commodities value 1976_2019 FAO
metadata
Downloaded: June, 23, 2022
Description: Quantity (tonnes) and value (USD) of raw commodities (Exports only) for each country, taxa, year. The FAO data is subset to include commodities in these categories: ornamental fish, fish oil, seaweed and plants (see: raw/commodities2products_weighting.csv for details).
Time range: 1976-2019
::opts_chunk$set(eval=FALSE)
knitr
## load libraries, set directories
library(ohicore) #devtools::install_github('ohi-science/ohicore@dev')
library(dplyr)
library(stringr)
library(tidyr)
library(zoo)
library(ggplot2)
library(here)
library(tidyverse)
library(plotly)
## Load FAO-specific user-defined functions
source(here('workflow/R/fao_fxn.R')) # function for cleaning FAO files
source(here('workflow/R/common.R')) # directory locations
## This file makes it easier to process data for the OHI global assessment
## by creating the following objects:
##
## * dir_M = identifies correct file path to Mazu (internal server) based on your operating system
## * mollCRS = the crs code for the mollweide coordinate reference system we use in the global assessment
## * regions_shape() = function to load global shapefile for land/eez/high seas/antarctica regions
## * ohi_rasters() = function to load two rasters: global eez regions and ocean region
## * region_data() = function to load 2 dataframes describing global regions
## * rgn_syns() = function to load dataframe of region synonyms (used to convert country names to OHI regions)
## * low_pop() = function to load dataframe of regions with low and no human population
## * UNgeorgn = function to load dataframe of UN geopolitical designations used to gapfill missing data
source(here('globalprep/np/v2022/R/np_fxn.R'))
Simultaneously read and process FAO commodities value and quantity data.
## NOTE: This can be run as a loop, but the "value" and "quant" datasets need to be run individually to make sure
## there are no problems (after this check, they can be looped for efficiency)
## describe where the raw data are located:
<- file.path(dir_M, 'git-annex/globalprep/_raw_data/FAO_commodities/d2022')
dir_fao_data
## list files included in d2020 folder (value and quant datasets)
<- list.files(dir_fao_data, pattern=glob2rx('*.csv'), full.names=TRUE)
files
## To compare to old data:
# dir_fao_data <- file.path(dir_M, 'git-annex/globalprep/_raw_data/FAO_commodities/d2020')
# files <- list.files(dir_fao_data, pattern=glob2rx('*.csv'), full.names=T)
## loop
for (f in files){ # f = files[2]
cat(sprintf('\n\n\n====\nfile: %s\n', basename(f)))
<- read.csv(f, check.names = FALSE, strip.white = TRUE, stringsAsFactors = FALSE) # stringsAsFactors=T
d # checks names syntactically, strips leading and trailing whitespace, prevents conversion of characters to factors
## Specifies that units are tonnes if we are reading in the Commodities Quantity data csv, and usd if we are reading in the Commodities Value data csv
<- c('tonnes','usd')[str_detect(f, c('quant','value'))] # detect unit name using lowercase American English
units
## gather into long format and clean up FAO-specific data foibles
## warning: attributes are not identical across measure variables; they will be dropped: this is fine
<- d %>%
m ::select(-`Unit (Name)`) %>%
dplyrrename(country = `Reporting country (Name)`,
commodity = `Commodity (Name)`,
trade = `Trade flow (Name)`) %>%
rename_with(~ gsub("\\[", "", .)) %>%
rename_with(~ gsub("\\]", "", .)) %>%
pivot_longer(cols = -c(country, commodity, trade, Unit),
names_to = "year", values_to = "value")
## Include only the "Exports" data:
## 2022 - changed from "Export" to "Exports"
<- m %>%
m filter(trade == "Exports")
<- m %>%
m fao_clean_data_new() %>% # swaps out FAO-specific codes. NOTE: optional parameter 'sub_N' can be passed to control how an 'N' code is interpreted.
select(-trade, -Unit) %>% # eliminate 'trade' column
arrange(country, commodity, is.na(value), year)
## Products join: attach product categories from com2prod, and
## filter out all entries that do not match a product category.
## Note: commodity_lookup is user-defined function to compare
## commodities in data vs commodities in lookup table
## load lookup for converting commodities to products
<- read.csv(here('globalprep/np/v2022/raw/commodities2products_weighting.csv'), na.strings='')
com2prod
## version used in 2019:
## read.csv(here('globalprep/np/v2019/raw/commodities2products.csv'), na.strings='')
## version used in 2018:
## com2prod <- read.csv('raw/commodities2products.csv', na.strings='')
## version used in 2015: use when testing....
## com2prod <- read.csv('../v2014_test/commodities2products.csv', na.strings='')
## Check the current commodity-to-product lookup table. If necessary, make changes to "raw/commodities2products_weighting.csv"
## v2021: Missing a couple in each category.
# Make sure to examine the fish oil category thoroughly though. Some of the instances that are caught here are NOT fish oil. For example: "Whole lobsters Homarus spp, in shell or not, dried, salted/in brine, smoked, cooked by steaming/boiling in water" is including because "boiling" has "oil" in it. Add the appropriate missing commodities to commodities2products_weighting.csv
# Don't worry about any that show up under corals, shells, or sponges, since we don't include them in the assessment anyways. We are just keeping them in for now, because we use them as an example in the methods document (see below).
np_commodity_lookup(m, com2prod)
## inner_join will attach product names to matching commodities according to
## lookup table 'com2prod', and eliminate all commodities that do not appear in the lookup table.
<- m %>%
m inner_join(com2prod, by='commodity')
## Special case: user-defined function deals with
## breaking up Antilles into separate reported rgns
<- np_split_antilles(m)
m
## Some changes to region names that aren't working in name_2_rgn()
<- m %>%
m mutate(country = ifelse(country == "Côte d'Ivoire", "Ivory Coast", country)) %>%
mutate(country = ifelse(country == "C<f4>te d'Ivoire ", "Ivory Coast", country)) %>%
mutate(country = ifelse(country == "C\xf4te d'Ivoire", "Ivory Coast", country)) %>%
mutate(country = ifelse(country == "Cura<e7>ao","Curacao", country)) %>%
mutate(country = ifelse(country == "Curaçao","Curacao", country)) %>%
mutate(country = ifelse(country == "Cura\xe7ao","Curacao", country)) %>%
mutate(country = ifelse(country == "R\xe9union", "Reunion", country)) %>%
mutate(country = ifelse(country == "Réunion", "Reunion", country)) %>%
mutate(country = ifelse(country == "R<e9>union", "Reunion", country)) %>%
filter(country != "Azerbaijan") # landlocked, but not being removed by name_2_rgn?
<- name_2_rgn(df_in = m,
m_rgn fld_name='country',
flds_unique=c('commodity', 'product', 'year'))
# v2021 duplicates: [1] "China" "China, Hong Kong SAR" "China, Macao SAR" "Guadeloupe"
# [5] "Martinique" "Montenegro" "Russian Federation" "Serbia and Montenegro"
# [9] "Sudan" "Sudan (former)" "Un. Sov. Soc. Rep." - these are all fixed in the group by and summarise below
## combine composite regions
## When summarizing the dataset, this function provides a modified way to sum the value column while maintaining NA values when both variables are NA (rather than turning to zero values). The function will sum non-NA values normally.
<- function(x) {
sum_function if (sum(is.na(x)) == length(x))
return(NA)
return(sum(x, na.rm = T))}
<- m_rgn %>%
m_rgn group_by(rgn_id, rgn_name, commodity, product, year) %>%
summarize(value = sum_function(value)) %>%
ungroup()
## units: rename value field to units based on filename
names(m_rgn)[names(m_rgn) == 'value'] <- units
## output to .csv - should create two csvs (tonnes.csv and usd.csv)
<- sprintf(here('globalprep/np/v2022/int/%s.csv'), units)
harvest_out write.csv(m_rgn, harvest_out, row.names = FALSE, na = '')
}
Combining the quantity and value data and a bit of cleaning to remove data prior to first reporting year for each commodity and region.
## Read in quant dataset from intermediate folder
<- read.csv(here('globalprep/np/v2022/int/tonnes.csv'))
h_tonnes
## Read in value dataset from intermediate folder
<- read.csv(here('globalprep/np/v2022/int/usd.csv'))
h_usd
## concatenates h_tonnes and h_usd data
## h includes rgn_name, rgn_id, commodity, product, year, tonnes, usd.
<- h_usd %>%
h full_join(h_tonnes, by=c('rgn_name', 'rgn_id', 'commodity', 'product', 'year')) %>%
mutate(commodity = as.character(commodity)) %>%
arrange(rgn_id, product, commodity, year)
## clips out years prior to first reporting year, for each commodity per region
<- h %>% np_harvest_preclip()
h
## save a file to use in our methods document
write.csv(h, "int/h_methods_sum.csv", row.names = FALSE)
# test <- h %>%
# group_by(product) %>%
# summarise(sum_tonnes = sum(tonnes, na.rm = TRUE),
# sum_usd = sum(usd, na.rm = TRUE)) ## we will show this table in the methods doc
<- h %>%
h ::filter(product %in% c("fish_oil", "ornamentals", "seaweeds")) ##filter for our commodities of interest dplyr
Summary of gapfilling that is performed:
<- h %>% np_harvest_gapflag()
h ## Adds flag for required gap-filling, based upon NAs in data.
## NOTE: Does not perform any gap-filling.
## At this point, h includes:
## rgn_name rgn_id commodity product year tonnes usd gapfill
## 'gapfill' will be in (zerofill, endfill, tbd, none)
<- h %>% np_datacheck()
data_check ## for each commodity within each region, creates (but doesn't save...) summary info:
## num_years: the length of the data series for this commodity in this region
## usd_unique_nz: (or 'tns') number of unique non-zero values for usd or tonnes
## usd_na & tns_na: number of NA occurrences
## paired_obs: number of non-zero paired observations
## usd_unique_pairs: (or 'tns') within set of paired observations, count of unique usd and tonnes
## unique_pairs: lesser of usd_unique_pairs and tns_unique_pairs
## count_no_data: number of paired NAs - years with no value reported
<- h %>% np_zerofill()
h ## for post-reporting years with NA for both tonnes and USD, fill zero -
## assumes that non-reporting indicates zero harvest to report.
## Also cross-fills zeros where one side is 0, other is NA (not flagged as gapfill)
<- h %>% np_lowdata_filter()
h ## Exclude commodities (within a region) that have few non-zero data points.
## Optional parameter with default: nonzero_h_yr_min = 4
## NOTE: This filter has consequences for the regression, but also has meaning in terms of
## not inflicting a penalty on regions trying, and then stopping, an experimental harvest.
## Melanie's script to add a georegional ID tag based on country keys and IDs.
<- h %>%
h add_georegion_id()
<- h %>% np_regr_fill(years_back = 10, vars = 'td', scope = 'rgn_id')
h <- h %>% np_regr_fill(vars = 'tdy', scope = 'georgn_id')
h <- h %>% np_regr_fill(vars = 'tdy', scope = 'global')
h ## np_regr_fill() is a generalized regression gapfill function. Parameters (with defaults):
## * years_back=50 (int): This determines how far back in the time series to include within the regression.
## * min_paired_obs=4 (int): This determines how many paired observations are required to attempt a regression.
## * scope = 'rgn_id' (str): ('rgn_id', 'georgn_id', 'global') Determines grouping scale for regression.
## * vars = 'tdy' (str): ('td', 'tdy') Determines model: (tonnes ~ usd) or (tonnes ~ usd + year) [and vice versa]
<- h %>% np_end_fill()
h ## For final year of data, if both usd and tonnes originally reported as NA, pull forward
## values for usd and tonnes from the previous year. This should happen after regression fill.
<- h
h_comm ## Store commodity-level data, before moving on to the product-level smoothing.
## Output gapfilling report to .csv files.
## Very few usd gapfilling, and none in recent years (data used to weight contributions), so will ignore this: gapfill=="r2_u_gr"
<- h %>%
h_gap mutate(gapfill = ifelse(gapfill == "r2_u_gr", "none", gapfill)) %>% # focusing only on tonnes gapfilling
select(rgn_id, commodity, product, year, gapfill)
<- h_gap %>%
h_gap_ornamentals filter(product == "ornamentals")
write.csv(h_gap, 'output/np_ornamentals_harvest_tonnes_gf.csv', row.names = FALSE, na = '')
Summarize each product per country per year, e.g., all corals in Albania in 2011. And, do some error checking.
<- h_comm %>%
h_prod group_by(rgn_name, rgn_id, product, year) %>%
summarize(tonnes = sum(tonnes, na.rm = TRUE),
usd = sum(usd, na.rm = TRUE))
## Error-checking and table exports to see if there are duplicates
stopifnot(sum(duplicated(h_prod[ , c('rgn_id', 'product', 'year')])) == 0)
Look at wide format with all commodities and product subtotal (where commodity column value is “Z_TOTAL”), compared with the input data prior to summing.
<- h_comm %>%
h_x_tonnes bind_rows(mutate(h_prod, commodity='Z_TOTAL')) %>%
select(rgn_name, rgn_id, commodity, product, year, tonnes) %>%
arrange(rgn_name, product, commodity, year) %>%
spread(year, tonnes)
<- h_comm %>%
h_x_usd bind_rows(mutate(h_prod, commodity='Z_TOTAL')) %>%
select(rgn_name, rgn_id, commodity, product, year, usd) %>%
arrange(rgn_name, product, commodity, year) %>%
spread(year, usd)
## Check a random country and commodity
<- h_x_usd %>% filter(product == "ornamentals", rgn_name == "Australia")
australia
australia
## Can open up in Excel to compare subtotals per country-product-year
write.csv(h_x_tonnes, 'int/np_harvest_tonnes_wide.csv', row.names = FALSE, na = 'NA')
write.csv(h_x_usd, 'int/np_harvest_usd_wide.csv', row.names = FALSE, na = 'NA')
Determine rolling averages for tonnes and USD in order to determine peak values. This is based upon total harvests by product group, not individual commodity.
# Find max year in the summarized data table
<- max(h_prod$year)
year_max
<- h_prod %>%
roll_prod arrange(rgn_id, product, year) %>%
group_by(rgn_id, product) %>%
mutate(
tonnes_rollmean = rollapply(tonnes, width=4, FUN=mean, align='right', partial=TRUE, na.rm=FALSE),
usd_rollmean = rollapply( usd, width=4, FUN=mean, align='right', partial=TRUE, na.rm=FALSE)) %>%
rename(
tonnes_orig = tonnes, # prevent overwriting of reported and gapfilled values
usd_orig = usd) %>% # prevent overwriting of reported and gapfilled values
mutate(
tonnes = ifelse(!is.na(tonnes_rollmean), tonnes_rollmean, tonnes_orig),
usd = ifelse(!is.na(usd_rollmean), usd_rollmean, usd_orig)) %>%
select(rgn_id, rgn_name, product, year, tonnes, usd, tonnes_orig, usd_orig)
Score harvest (tonnes and usd) relative to peaks. Output values as .csvs. Perform this for all given scenarios, using a for loop.
## Find peak harvest per region-product and apply conservative buffer (scale down)
## Find max USD value over the last 10 years
<- roll_prod %>%
peak_prod group_by(rgn_id, product) %>%
mutate(tonnes_peak = max(tonnes, na.rm=T)) %>%
ungroup()
## Determine relative status:
<- peak_prod %>%
smooth_prod mutate(tonnes_rel = ifelse(tonnes >= tonnes_peak, 1, tonnes / tonnes_peak))
## Write entire data frame to .csv:
write.csv(smooth_prod, 'int/np_harvest_smoothed_data.csv', row.names = FALSE, na = '')
## Save tonnes/usd data for weighting purposes
<- smooth_prod %>%
tonnes select(rgn_id, product, year, tonnes, usd)
write.csv(tonnes, 'int/np_harvest_tonnes_usd.csv', row.names = FALSE, na = '')
## Save relative tonnes data for the ornamentals layer
<- smooth_prod %>%
tonnes_ornamentals_rel ::filter(product == "ornamentals") %>%
dplyr::select(rgn_id, product, year, tonnes_rel)
dplyr
write.csv(tonnes_ornamentals_rel, 'output/np_ornamentals_harvest_tonnes_rel.csv', row.names = FALSE, na = '')
### calculates NP exposure based on habitats (ornamentals).
### Returns the first input data frame with a new column for exposure:
### [rgn_id rgn_name product year tonnes tonnes_rel prod_weight exposure]
#########################################.
<- read_csv("int/np_harvest_tonnes_usd.csv") %>%
np_harvest_orn ::select(year, rgn_id, product, tonnes) %>%
dplyrfilter(product == "ornamentals")
### Determine Habitat Areas for Exposure
<- read_csv(file.path(here(), "globalprep/hab_rockyreef/v2012/data/habitat_extent_rocky_reef_updated.csv")) %>%
hab_rocky ::select(rgn_id, km2) %>%
dplyr::filter(km2 > 0)
dplyr
<- read_csv(file.path(here(), "globalprep/hab_coral/v2021/data/habitat_extent_coral_updated.csv")) %>%
hab_coral ::select(rgn_id, km2) %>%
dplyr::filter(km2 > 0)
dplyr
### area for products in both coral and rocky reef habitats: shells, ornamentals, sponges
<- np_harvest_orn %>%
area_dual_hab ::left_join(hab_coral %>%
dplyr::rename(coral_km2 = km2),
dplyrby = c('rgn_id')) %>%
left_join(hab_rocky %>%
::rename(rocky_km2 = km2),
dplyrby = c('rgn_id')) %>%
::rowwise() %>%
dplyr::mutate(km2 = sum(c(rocky_km2, coral_km2), na.rm = TRUE)) %>%
dplyr::filter(km2 > 0) %>%
dplyr::select(-coral_km2,-rocky_km2)
dplyr
### Determine Exposure
### exposure: combine areas, get tonnes / area, and rescale with log transform
<-
np_exp_orn %>%
area_dual_hab ::mutate(expos_raw = ifelse(tonnes > 0 &
dplyr> 0, (tonnes / km2), 0)) %>%
km2 ::group_by(product) %>%
dplyr::mutate(expos_prod_max = max(expos_raw, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(exposure = (log(expos_raw + 1) / log(expos_prod_max + 1)),
dplyrexposure = ifelse(exposure > 1, 1, exposure)) %>%
::select(-km2,-expos_raw,-expos_prod_max)
dplyr### clean up columns
<- np_exp_orn %>%
gap_fill ::mutate(gapfilled = ifelse(is.na(exposure), 1, 0)) %>%
dplyr::mutate(method = ifelse(is.na(exposure), "prod_average", NA)) %>%
dplyr::select(rgn_id = rgn_id, product, year, gapfilled, method)
dplyrwrite.csv(gap_fill, "output/np_exposure_ornamentals_gf.csv", row.names = FALSE)
### add exposure for countries with (habitat extent == NA)
<- np_exp_orn %>%
np_exp_orn ::group_by(product) %>%
dplyr::mutate(mean_exp = mean(exposure, na.rm = TRUE)) %>%
dplyr::mutate(exposure = ifelse(is.na(exposure), mean_exp, exposure)) %>%
dplyr::select(-mean_exp) %>%
dplyr::ungroup() %>%
dplyr::mutate(product = as.character(product)) %>%
dplyr::select(rgn_id, year, product, exposure)
dplyr
##save exposure as a layer
write.csv(np_exp_orn, "output/np_exposure_ornamentals.csv", row.names = FALSE)
### calculates NP risk based on:
### ornamentals: risk = 1 if blast or cyanide fishing
### Returns a data frame of risk, by product, by region:
###
#########################################.
### Determine Risk
<-
r_cyanide read_csv(file.path(here(), "globalprep/np_prs_poison_blast_fishing/v2013/data/gl_thr_poison_3nm_rgn2013.csv")) %>%
#AlignDataYears(layer_nm = "np_cyanide", layers_obj = layers) %>%
::filter(!is.na(score) & score > 0) %>%
dplyr::select(rgn_id,
dplyr#year = scenario_year,
cyanide = score)
<-
r_blast read_csv(file.path(here(), "globalprep/np_prs_poison_blast_fishing/v2013/data/gl_thr_blast_3nm_rgn2013.csv")) %>%
#AlignDataYears(layer_nm = "np_blast", layers_obj = layers) %>%
filter(!is.na(score) & score > 0) %>%
select(rgn_id,
#year = scenario_year,
blast = score)
### risk for ornamentals set to 1 if blast or cyanide fishing present, based on Nature 2012 code
### despite Nature 2012 Suppl saying Risk for ornamental fish is set to the "relative intensity of cyanide fishing"
<- r_cyanide %>%
risk_orn full_join(r_blast, by = c("rgn_id")) %>%
mutate(ornamentals = 1) %>%
select(rgn_id, ornamentals)
### risk as binary
<-
np_risk_orn expand.grid(
rgn_id = unique(np_harvest_orn$rgn_id),
year = unique(np_harvest_orn$year)
%>%
) ### ornamentals
left_join(risk_orn, by = c('rgn_id')) %>%
mutate(ornamentals = ifelse(is.na(ornamentals), 0, ornamentals)) %>%
gather(product, risk,-rgn_id,-year) %>%
mutate(product = as.character(product))
<- np_risk_orn %>%
np_risk_gf mutate(gapfilled = 0, method = NA) %>%
::select(-risk)
dplyrwrite.csv(np_risk_gf, "output/np_risk_ornamentals_gf.csv", row.names = FALSE)
## write this as a risk layer
write.csv(np_risk_orn, "output/np_risk_ornamentals.csv", row.names = FALSE)