if float(float(idx))>60 and float(idx)<=70:
cl = 0 if float(idx)>70 and float(idx)<=80: cl = 1 if float(idx)>80 and float(idx)<=90: cl = 2 if float(idx)>90 and float(idx)<=100: cl = 3 if float(idx)>100 and float(idx)<=110: cl = 4 if float(idx)>110 and float(idx)<=120: cl = 5 if float(idx)>120 and float(idx)<=130: cl = 6Skeleton 1:
class ConvNet(nn.Module): def __init__(self, num_classes=7): super(ConvNet, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 16,kernel_size=2, stride=1, padding=1), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=1)) self.layer2 = nn.Sequential( nn.Conv2d(16, 32, kernel_size=2, stride=1, padding=1), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=1)) self.fc1 = nn.Linear(64000, 32) self.fc2 = nn.Linear(32, 7) #self.sm = nn.Softmax() def forward(self, x): out = self.layer1(x) #print(out.shape) out = self.layer2(out) #print(out.shape) out = out.reshape(out.size(0), -1) #print(out.shape) out = self.fc1(out) out = self.fc2(out) #out = self.sm(out) return outCE loss
OUTPUT:
Epoch [11/20], Step [3800/4010], Loss: 0.0124Epoch [11/20], Step [3900/4010], Loss: 0.4609Epoch [11/20], Step [4000/4010], Loss: 0.0170Train Accuracy : 95 %Validation Accuracy : 24 %Skeleton 2:
filter 4 -8:
Epoch [20/20], Step [4000/4010], Loss: 0.0000Train Accuracy : 94 %Validation Accuracy : 23 %Skeleton 3 :
with dropout of 0.3 after each layer:
Epoch [20/20], Step [4000/4010], Loss: 0.0538Train Accuracy : 63 %Validation Accuracy : 23 %