Problem with TensorFlow in R

Hello,

I'm trying to use TensorFlow through keras3 as below, but I get the below error:

library(tensorflow)
library(keras3)
library(dplyr)
library(caret)
set.seed(123)
test$Test <- as.factor(test$Test)
test$Test <- as.numeric(test$Test) - 1  # Convert to 0 and 1 for binary classification
# Define the features and labels
features <- c("Response")
labels <- "Test"
sample <- sample.int(n = nrow(test), size = floor(.7 * nrow(test)), replace = FALSE)
train_data <- test[sample, ]
test_data <- test[-sample, ]
x_train <- as.matrix(train_data[, features])
y_train <- train_data[, labels]
x_test <- as.matrix(test_data[, features])
y_test <- test_data[, labels]
# Define the number of folds for cross-validation
k <- 5  # Example: 5-fold cross-validation
# Initialize variables to store metrics
accuracy_scores <- numeric(k)
precision_scores <- numeric(k)
recall_scores <- numeric(k)
f1_scores <- numeric(k)
auc_scores <- numeric(k)
# Loop over each fold for cross-validation
for (i in 1:k) {
    cat(paste("Processing fold", i, "\n"))
    # Split data into training and validation sets for this fold
    val_indices <- unlist(createFolds(test$Test, k = k, list = TRUE, returnTrain = FALSE)[i])
    train_indices <- unlist(createFolds(test$Test, k = k, list = TRUE, returnTrain = FALSE)[-i])
    x_train_cv <- as.matrix(test[train_indices, features])
    y_train_cv <- test[train_indices, labels]
    x_val_cv <- as.matrix(test[val_indices, features])
    y_val_cv <- test[val_indices, labels]
    # Build the model
    model <- keras_model_sequential() %>%
        layer_dense(units = 128, activation = 'relu', input_shape = c(length(features))) %>%
        layer_dropout(rate = 0.4) %>%
        layer_dense(units = 128, activation = 'relu') %>%
        layer_dropout(rate = 0.3) %>%
        layer_dense(units = 1, activation = 'sigmoid')
    #include f1
    f1_score_metric <- function(y_true, y_pred) {
        # Compute true positives (TP), false positives (FP), and false negatives (FN)
        TP <- sum(y_pred * y_true)
        FP <- sum(y_pred * (1 - y_true))  # y_true = 0 where there are false positives
        FN <- sum((1 - y_pred) * y_true)  # y_pred = 0 where there are false negatives
        # Compute precision and recall
        precision <- TP / (TP + FP)
        recall <- TP / (TP + FN)
        # Compute F1 score
        f1_score <- 2 * precision * recall / (precision + recall)
        return(f1_score)
    }
    # Compile the model
    model %>% compile(loss = 'binary_crossentropy', optimizer = optimizer_adam(), metrics = c("accuracy", "precision", "recall", "AUC", f1_score_metric))
    # Train the model with evaluation on validation data
    history <- model %>% fit(x_train_cv, y_train_cv, epochs = 30, batch_size = 10, validation_data = list(x_val_cv, y_val_cv), verbose = 0)
    # Evaluate on validation data for this fold
    scores <- model %>% evaluate(x_val_cv, y_val_cv, verbose = 0)
    # Store metrics
    accuracy_scores[i] <- scores$accuracy
    precision_scores[i] <- scores$precision
    recall_scores[i] <- scores$recall
    f1_scores[i] <- 2 * (precision_scores[i] * recall_scores[i]) / (precision_scores[i] + recall_scores[i])
    auc_scores[i] <- scores$AUC
}
# Print average cross-validation metrics
cat('Average cross-validation metrics:\n')
cat('Accuracy:', mean(accuracy_scores), '\n')
cat('Precision:', mean(precision_scores), '\n')
cat('Recall:', mean(recall_scores), '\n')
cat('Test F1 score:', f1_score_metric(y_test, model %>% predict(x_test)), '\n')
cat('AUC:', mean(auc_scores), '\n')
Error in do.call(keras$models$Sequential, Sequental_args) : 
  'what' must be a function or character string

I have already installed keras and keras3.

Any ideas?

George

Can you please edit your example code to make it reproducible, such that I can copy paste it into a fresh R session and reproduce the error?
It's unclear what the test object is. Ideally you can generate a minimal example dataset with runif() and and/or sample(), or dump out a similar object with dput().