Title: Need help resolving "could not find function" error in R code
Question:
I've been working on a script in R for extracting data and encountered an error that I'm having trouble resolving. Here's the code snippet where the error occurs:
...
df <- readRDS(file.path(data_dir, SURVEY_NAME, "FinalData", "Individual Datasets",
"survey_socioeconomic.Rds"))
coordinates(df) <- ~longitude.x+latitude.x
crs(df) <- CRS("+init=epsg:4326")
df <- geo.buffer_chunks(df, r = buffer_i, chunk_size = 100)
...
The error message I'm getting is: "Error in geo.buffer_chunks(df, r = buffer_i, chunk_size = 100) : could not find function "geo.buffer_chunks"."
I've checked my loaded packages, but it seems that geo.buffer_chunks
isn't recognized. I've also looked through the code, but couldn't find where this function is defined.
Can anyone help me understand why this function isn't recognized and how I can resolve this error? Any working alternative to this please Thanks in advance!
# Extract Black Marble Data
rm(list = ls())
renv::restore()
pacman::p_load(tidyverse,
rgdal,
viridis,
readstata13,
dplyr,
data.table,
raster,
stargazer,
stringdist,
tmaptools,
stringr,
geosphere,
rgeos,
haven,
ggmap,
sf,
sp,
glmnet,
rgeos,
caret,
mltest,
RANN,
lubridate,
jsonlite,
httr,
curl,
ggpmisc,
haven,
sjmisc,
dbscan,
ggplot2,
spatialEco,
geosphere,
radiant.data,
readxl,
mclust,
missMDA,
DescTools,
furrr,
countrycode,
FactoMineR,
progressr,
ggmap,
ggridges,
ggpubr,
xgboost,
WDI,
scales,
ggExtra,
ggrepel,
ggcorrplot,
rnaturalearth,
ggthemes,
gghalves,
ggtext,
ggsignif,
LiblineaR,
caret,
exactextractr)
github_dir = "E:/Big Data Poverty Estimation/"
source(file.path(github_dir, "Functions", "functions.R"))
data_dir <- "E:/Big Data Poverty Estimation/Data"
ntl_harmon_dir <- file.path(data_dir, "DMSPOLS_VIIRS_Harmonized")
# Options:
# -- DHS
# -- DHS_nga_policy_experiment
# -- LSMS
SURVEY_NAME <- "DHS"
# Delete existing files --------------------------------------------------------
if(F){
to_rm <- file.path(data_dir, SURVEY_NAME, "FinalData", "Individual Datasets") %>%
list.files(full.names = T) %>%
str_subset("ntl_harmonized181920_")
for(to_rm_i in to_rm) file.remove(to_rm_i)
}
# Prep NTL Data ----------------------------------------------------------------
r18 <- raster(file.path(ntl_harmon_dir, "RawData", paste0("Harmonized_DN_NTL_",2018,"_simVIIRS.tif")))
r19 <- raster(file.path(ntl_harmon_dir, "RawData", paste0("Harmonized_DN_NTL_",2019,"_simVIIRS.tif")))
r20 <- raster(file.path(ntl_harmon_dir, "RawData", paste0("Harmonized_DN_NTL_",2020,"_simVIIRS.tif")))
r_stack <- stack(r18, r19, r20)
r <- calc(r_stack, fun = mean, na.rm = T)
# Extract Data -----------------------------------------------------------------
buffer_i <- 1120
for(buffer_i in c(1120, 3360)){
print(buffer_i)
OUT_PATH <- file.path(data_dir, SURVEY_NAME, "FinalData", "Individual Datasets",
paste0("ntl_harmonized181920_",buffer_i,".Rds"))
if(!file.exists(OUT_PATH)){
#### Prep Survey Data
df <- readRDS(file.path(data_dir, SURVEY_NAME, "FinalData", "Individual Datasets",
"survey_socioeconomic.Rds"))
coordinates(df) <- ~longitude.x+latitude.x
crs(df) <- CRS("+init=epsg:4326")
df <- geo.buffer_chunks(df, r = buffer_i, chunk_size = 100)
#### Extract values
df$ntlharmon_avg <- exact_extract(r, df, 'mean')
df_out <- df@data %>%
dplyr::select(uid, year, ntlharmon_avg)
saveRDS(df_out, OUT_PATH)
}
}