|
|
@@ -1,14 +1,10 @@
|
|
|
|
|
|
|
|
|
-import numpy as np
|
|
|
-
|
|
|
import torch
|
|
|
import torchvision.transforms as transforms
|
|
|
import torch.nn.init
|
|
|
|
|
|
from PIL import Image
|
|
|
-from skimage import transform, util
|
|
|
-from typing import Tuple
|
|
|
|
|
|
keep_prob = 0.9
|
|
|
n_classes = 8 # multi hot encoded
|
|
|
@@ -83,16 +79,11 @@ class Predictor():
|
|
|
self.cnn.eval()
|
|
|
|
|
|
def predict(self, input_img:Image) -> Prediction:
|
|
|
- im_gray = np.array(input_img)
|
|
|
-
|
|
|
- # resizing to 44 x 44 pixels. Model is trained on 44 x 44 pixel images
|
|
|
- resize_im = (44, 44)
|
|
|
- im_gray = util.img_as_ubyte(transform.resize(im_gray, resize_im, order = 1, anti_aliasing = False))
|
|
|
-
|
|
|
- # convert uint8 to some PIL image format, values range 0..1, float32 (float64 vil give error), kan sikkert også gøres på andre måder
|
|
|
- image = Image.fromarray(im_gray) # np.shape(image) = (44,44)
|
|
|
+
|
|
|
+ # Resize to 44 x 44 pixels, and convert to grayscale (if not already). Model is trained on 44 x 44 pixel images
|
|
|
+ img = input_img.resize((44, 44)).convert("L")
|
|
|
|
|
|
- im_tensor = transforms.ToTensor()(image).unsqueeze(0) # unsqueeze(0) giver [1,44,44] -> [1,1,44,44]
|
|
|
+ im_tensor = transforms.ToTensor()(img).unsqueeze(0) # unsqueeze(0) giver [1,44,44] -> [1,1,44,44]
|
|
|
|
|
|
# print(np.shape(im_tensor)) # = [1,1,44,44] det er det format som modellen tager som input. [n_batch, channel, imx, imy]
|
|
|
# Hvis batch, n af flere billeder på en gang skal det pakkes i formatet [n,1,44,44]
|