Jelajahi Sumber

Imports and requirements.txt simplified

Lars 2 tahun lalu
induk
melakukan
c012ace4ff
2 mengubah file dengan 5 tambahan dan 19 penghapusan
  1. 1 6
      requirements.txt
  2. 4 13
      src/predictor.py

+ 1 - 6
requirements.txt

@@ -5,9 +5,7 @@ filelock==3.13.1
 fsspec==2023.10.0
 gunicorn==21.2.0
 idna==3.4
-imageio==2.31.6
 Jinja2==3.1.2
-lazy_loader==0.3
 MarkupSafe==2.1.3
 mpmath==1.3.0
 networkx==3.2.1
@@ -25,12 +23,9 @@ nvidia-nccl-cu12==2.18.1
 nvidia-nvjitlink-cu12==12.3.52
 nvidia-nvtx-cu12==12.1.105
 packaging==23.2
-Pillow==10.0.1
+Pillow==10.1.0
 requests==2.31.0
-scikit-image==0.22.0
-scipy==1.11.3
 sympy==1.12
-tifffile==2023.9.26
 torch==2.1.0
 torchvision==0.16.0
 triton==2.1.0

+ 4 - 13
src/predictor.py

@@ -1,14 +1,10 @@
 
 
-import numpy as np
-
 import torch
 import torchvision.transforms as transforms
 import torch.nn.init
 
 from PIL import Image
-from skimage import transform, util
-from typing import Tuple
 
 keep_prob = 0.9 
 n_classes = 8  # multi hot encoded
@@ -83,16 +79,11 @@ class Predictor():
         self.cnn.eval()
 
     def predict(self, input_img:Image) -> Prediction:
-        im_gray = np.array(input_img)
-
-        # resizing to 44 x 44 pixels. Model is trained on 44 x 44 pixel images
-        resize_im = (44, 44)
-        im_gray = util.img_as_ubyte(transform.resize(im_gray, resize_im, order = 1, anti_aliasing = False))
-
-        # convert uint8 to some PIL image format, values range 0..1, float32 (float64 vil give error), kan sikkert også gøres på andre måder
-        image = Image.fromarray(im_gray)  # np.shape(image) = (44,44)
+        
+        # Resize to 44 x 44 pixels, and convert to grayscale (if not already). Model is trained on 44 x 44 pixel images
+        img = input_img.resize((44, 44)).convert("L")
 
-        im_tensor = transforms.ToTensor()(image).unsqueeze(0)  # unsqueeze(0) giver [1,44,44] -> [1,1,44,44]
+        im_tensor = transforms.ToTensor()(img).unsqueeze(0)  # unsqueeze(0) giver [1,44,44] -> [1,1,44,44]
 
         # print(np.shape(im_tensor)) # = [1,1,44,44] det er det format som modellen tager som input.  [n_batch, channel, imx, imy]
         # Hvis batch, n af flere billeder på en gang skal det pakkes i formatet [n,1,44,44]