Visualization of Machine Learning

#Instantiating a model from an input tensor and a list of output tensorslayer_outputs = [layer.output for layer in ArcNet.layers[:]] # Extracts the outputs of the top 12 layersactivation_model = tf.keras.models.Model(inputs=ArcNet.input, outputs=layer_outputs) # Creates a model that will return these outputs, given the model inputi= 105   # Put the sample number to visualize the last conv layer output# Running the model in predic modetest_one        =       test[i:i+1, :]test_label_one  = test_label[i:i+1]# print(test_one)# converting all types of labels (y values) into categorical data of 2 classesOneHot_test_label_one = np_utils.to_categorical(test_label_one, num_classes=8)
input_tensor= test_one.reshape(test_one.shape[0], test_one.shape[1], 1)   #Input data in correct 1D format
activations = activation_model.predict(input_tensor) #Returns a list of five numpy array per layer activation
nth_layer_activation = activations[7]print(nth_layer_activation.shape)# plt.matshow(first_layer_activation[0, :, 4], cmap='viridis')
pred=ArcNet.predict_classes(test_one.reshape(test_one.shape[0], test_one.shape[1],1))print(pred)
label=test_label[i]plt.plot(test[i])# print(test[285].shape)plt.xlabel(str(i) +": Labeled as ["+ str(label)+ "] , Predicted as "+ str(pred))plt.show()# print(test[3773])
# Visualizing every channel in every intermediate activation# Say ArcNet is your built deep learning model for which you want to see the output of different layerslayer_names = []for layer in ArcNet.layers[:8]:    layer_names.append(layer.name) # Names of the layers, so you can have them as part of your plotimages_per_row = 32for layer_name, layer_activation in zip(layer_names, activations): # Displays the feature maps    n_features = layer_activation.shape[-1] # Number of features in the feature map    size = layer_activation.shape[1] #The feature map has shape (1, size, size, n_features).    n_cols = n_features // images_per_row # Tiles the activation channels in this matrix    display_grid = np.zeros((size * n_cols, images_per_row * size))    for col in range(n_cols): # Tiles each filter into a big horizontal grid        for row in range(images_per_row):            channel_image = layer_activation[0, :, col * images_per_row + row]            channel_image -= channel_image.mean() # Post-processes the feature to make it visually palatable            channel_image /= channel_image.std()            channel_image *= 64            channel_image += 128            channel_image = np.clip(channel_image, 0, 255).astype('uint8')            display_grid[col * size : (col + 1) * size, # Displays the grid                         row * size : (row + 1) * size] = channel_image    scale = 1. / size    plt.figure(figsize=(scale * display_grid.shape[1],                        scale * display_grid.shape[0]))    plt.title(layer_name)    plt.grid(False)    plt.imshow(display_grid, aspect='auto', cmap='viridis')

# Part of the last layers for the outputs of the above code are printed below.