-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest.py
More file actions
85 lines (63 loc) · 2.59 KB
/
test.py
File metadata and controls
85 lines (63 loc) · 2.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
"""
Author: Lee Taylor
test.py : test.py - test the C3D model from: https://arxiv.org/pdf/2206.13318v3.pdf
"""
import random
from model import C3D
from functions import dataloader_test, data_augment
import torch.nn.functional as f
import torch
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
# Init. lightweight C3D model
c3d = C3D(num_classes=2)
# print(f"\nc3d.train_c3d() = {c3d.train_c3d(1)}")
c3d.load_checkpoint("checkpoints/augmented_normalized_ratiosampling_batchsize64_patch/C3D_at_epoch16.pth")
preds = []
labels_list = []
for loops in range(10):
# outputs, vtemp = self(inputs) # prediction, temporal weights
inputs_list = list(dataloader_test())
random.shuffle(inputs_list)
for i, data in enumerate(inputs_list):
# model input-output
inputs, labels = data
# data augmentation
# inputs = data_augment(inputs)
# acquire outputs from passed inputs
outputs, vtemp = c3d(inputs)
outputs_vals = f.softmax(outputs[0], dim=0).detach().numpy()
outputs_vals_rounded = [round(outputs_vals[0]), round(outputs_vals[1])]
print(f"outputs = {outputs_vals_rounded}, labels = {labels}")
# Save predictions and labels for evaluation
preds.append(outputs_vals_rounded[0])
labels_list.append(int(labels[0]))
# Mark end of test loop
pass
print("Finished Predicting")
print()
# Inefficient naming
y_pred = preds
y_test_single_label = labels_list
# Debug output
print(f"y_test_single_label = {labels_list}")
print(f"y_pred = {preds}")
# Create a confusion matrix
tn, fp, fn, tp = confusion_matrix(y_test_single_label, y_pred).ravel()
# Compute metrics
accuracy = accuracy_score(y_test_single_label, y_pred)
sensitivity = recall_score(y_test_single_label, y_pred) # Sensitivity is the same as Recall
specificity = tn / (tn+fp)
precision = precision_score(y_test_single_label, y_pred)
f1 = f1_score(y_test_single_label, y_pred)
metrics = [accuracy, sensitivity, specificity, precision, f1]
for i, v in enumerate(metrics):
metrics[i] = str(round(v * 100, 2)) + "%"
print(f"Accuracy: {metrics[0]}")
print(f"Sensitivity: {metrics[1]}")
print(f"Specificity: {metrics[2]}")
print(f"Precision: {metrics[3]}")
print(f"F1-score: {metrics[4]}")
# Mark EOF
pass