Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Fix the oversight, wherein the feature extractor had not been initial…
Browse files Browse the repository at this point in the history
…ized whilst testing the autoencoders
wannabeOG committed Nov 25, 2019

Unverified

This user has not yet uploaded their public signing key.
1 parent 97579a5 commit 031b87c
Showing 1 changed file with 25 additions and 15 deletions.
40 changes: 25 additions & 15 deletions test_models.py
Original file line number Diff line number Diff line change
@@ -22,6 +22,7 @@
import argparse
import numpy as np
from random import shuffle
import os

import copy
from autoencoder import *
@@ -38,13 +39,14 @@
parser = argparse.ArgumentParser(description='Test file')
#parser.add_argument('--task_number', default=1, type=int, help='Select the task you want to test out the architecture; choose from 1-4')
parser.add_argument('--use_gpu', default=False, type=bool, help = 'Set the flag if you wish to use the GPU')

parser.add_argument('--batch_size', default=16, type=int, help='Batch size you want to use whilst testing the model')
args = parser.parse_args()
use_gpu = args.use_gpu


#randomly shuffle the tasks in the sequence
task_number_list = [x for x in range(1, 10)]
shuffle(task_number)
shuffle(task_number_list)


#transformations for the test data
@@ -65,12 +67,10 @@
])
}


#create the results.txt file
with open("results.txt", "w") as myfile:
myfile.write()
myfile.close()

#set the device to be used and initialize the feature extractor to feed the data into the autoencoder
device = torch.device("cuda:0" if use_gpu else "cpu")
feature_extractor = Alexnet_FE(models.alexnet(pretrained=True))
feature_extractor.to(device)

for task_number in task_number_list:

@@ -91,14 +91,14 @@
image_folder = datasets.ImageFolder(os.path.join(path_task, 'test'), transform = data_transforms_mnist['test'])
dset_size = len(image_folder)

device = torch.device("cuda:0" if use_gpu else "cpu")


dset_loaders = torch.utils.data.DataLoader(image_folder, batch_size = batch_size,
shuffle=True, num_workers=4)

best_loss = 99999999999
model_number = 0


#Load autoencoder models for tasks 1-4; need to select the best performing autoencoder model
for ae_number in range(1, 10):
ae_path = os.path.join(encoder_path, "autoencoder_" + str(ae_number))
@@ -122,12 +122,19 @@
else:
input_data = Variable(input_data)

preds = model(input_data)
loss = encoder_criterion(preds, input_data)

#get the input to the autoencoder from the conv backbone of the Alexnet
input_to_ae = feature_extractor(input_data)
input_to_ae = input_to_ae.view(input_to_ae.size(0), -1)

#get the outputs from the model
preds = model(input_to_ae)
loss = encoder_criterion(preds, input_to_ae)

del preds
del input_data

del input_to_ae

running_loss = running_loss + loss.item()

model_loss = running_loss/dset_size
@@ -146,15 +153,17 @@
print ("Incorrect routing, wrong model has been selected")


trained_model_path = os.path.join(model_path, "model_" + model_number)
#Load the expert that has been found by this procedure into memory
trained_model_path = os.path.join(model_path, "model_" + str(model_number))

#Get the number of classes that this expert was exposed to
file_name = os.path.join(trained_model_path, "classes.txt")
file_object = open(file_name, 'r')

num_of_classes = file_object.read()
file_object.close()

num_of_classes = int(num_of_classes_old)
num_of_classes = int(num_of_classes)

model = GeneralModelClass(num_of_classes)
model.load_state_dict(torch.load(os.path.join(trained_model_path, 'best_performing_model.pth')))
@@ -193,6 +202,7 @@
model_loss = running_loss/dset_size
model_accuracy = running_corrects.double()/dset_size

#Store the results into a file
with open("results.txt", "a") as myfile:
myfile.write("\n{}: {}".format(task_number, model_accuracy*100))
myfile.close()

0 comments on commit 031b87c

Please sign in to comment.