diff --git a/BML_project/active_learning/ss_active_learning.py b/BML_project/active_learning/ss_active_learning.py index d75b67f..98b9425 100644 --- a/BML_project/active_learning/ss_active_learning.py +++ b/BML_project/active_learning/ss_active_learning.py @@ -81,6 +81,8 @@ def stochastic_compare_kmeans_gp_predictions(kmeans_model, gp_model, data_loader all_labels.append(labels.cpu().numpy()) all_data.append((gp_predictions, kmeans_predictions)) print(f"Processed batch size: {len(current_batch_labels)}, Cumulative original_labels size: {len(original_labels)}, Cumulative gp_predictions size: {len(gp_predictions)}") + if len(current_batch_labels) < expected_batch_size: + print(f"Last batch processed with size: {len(current_batch_labels)}") return all_data, np.concatenate(all_labels) import random diff --git a/BML_project/ss_main.py b/BML_project/ss_main.py index 1b9cfce..b784ce4 100644 --- a/BML_project/ss_main.py +++ b/BML_project/ss_main.py @@ -10,7 +10,7 @@ from models.ss_gp_model import MultitaskGPModel, train_gp_model from utils_gp.ss_evaluation import stochastic_evaluation, evaluate_model_on_all_data from active_learning.ss_active_learning import stochastic_uncertainty_sampling, run_minibatch_kmeans, stochastic_compare_kmeans_gp_predictions -from utils.visualization import plot_comparative_results, plot_training_performance, plot_results +from utils_gp.visualization import plot_comparative_results, plot_training_performance, plot_results device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -50,6 +50,7 @@ def main(): # Update the training loader with uncertain samples train_loader = update_train_loader_with_uncertain_samples(train_loader, uncertain_sample_indices, batch_size) + print(f"Updated training data size: {len(train_loader.dataset)}") # Re-train the model with the updated training data model, likelihood, val_metrics = train_gp_model(train_loader, val_loader, num_iterations=10, n_classes=n_classes, patience=10, checkpoint_path='model_checkpoint_last.pt') @@ -71,7 +72,7 @@ def main(): results['test_metrics'] = test_metrics test_gp_vs_kmeans_data, test_original_labels = stochastic_compare_kmeans_gp_predictions(test_kmeans_model, model, test_loader, n_batches=5, device=device) - # Before calling confusion_matrix in plot_comparative_results function + print(f"Length of original_labels: {len(original_labels)}, Length of gp_predictions: {len(gp_predictions)}") plot_comparative_results(test_gp_vs_kmeans_data, test_original_labels)