Skip to content

Commit

Permalink
Resolving minimal errors
Browse files Browse the repository at this point in the history
  • Loading branch information
Luis Roberto Mercado Diaz committed Apr 2, 2024
1 parent 4f589f1 commit 55d2e27
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 2 deletions.
2 changes: 2 additions & 0 deletions BML_project/active_learning/ss_active_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ def stochastic_compare_kmeans_gp_predictions(kmeans_model, gp_model, data_loader
all_labels.append(labels.cpu().numpy())
all_data.append((gp_predictions, kmeans_predictions))
print(f"Processed batch size: {len(current_batch_labels)}, Cumulative original_labels size: {len(original_labels)}, Cumulative gp_predictions size: {len(gp_predictions)}")
if len(current_batch_labels) < expected_batch_size:
print(f"Last batch processed with size: {len(current_batch_labels)}")
return all_data, np.concatenate(all_labels)

import random
Expand Down
5 changes: 3 additions & 2 deletions BML_project/ss_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from models.ss_gp_model import MultitaskGPModel, train_gp_model
from utils_gp.ss_evaluation import stochastic_evaluation, evaluate_model_on_all_data
from active_learning.ss_active_learning import stochastic_uncertainty_sampling, run_minibatch_kmeans, stochastic_compare_kmeans_gp_predictions
from utils.visualization import plot_comparative_results, plot_training_performance, plot_results
from utils_gp.visualization import plot_comparative_results, plot_training_performance, plot_results

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

Expand Down Expand Up @@ -50,6 +50,7 @@ def main():

# Update the training loader with uncertain samples
train_loader = update_train_loader_with_uncertain_samples(train_loader, uncertain_sample_indices, batch_size)
print(f"Updated training data size: {len(train_loader.dataset)}")

# Re-train the model with the updated training data
model, likelihood, val_metrics = train_gp_model(train_loader, val_loader, num_iterations=10, n_classes=n_classes, patience=10, checkpoint_path='model_checkpoint_last.pt')
Expand All @@ -71,7 +72,7 @@ def main():

results['test_metrics'] = test_metrics
test_gp_vs_kmeans_data, test_original_labels = stochastic_compare_kmeans_gp_predictions(test_kmeans_model, model, test_loader, n_batches=5, device=device)
# Before calling confusion_matrix in plot_comparative_results function

print(f"Length of original_labels: {len(original_labels)}, Length of gp_predictions: {len(gp_predictions)}")
plot_comparative_results(test_gp_vs_kmeans_data, test_original_labels)

Expand Down

0 comments on commit 55d2e27

Please sign in to comment.