1

これは、@ptrblck一部の画像のデータ増強のために Pytorch フォーラムで主に提供されているスニペットです。

タスクはセグメンテーションであるため、画像とそれに対応するマスクを拡張する必要があると思います。

どのように見えるかを知るために、変換後にいくつかの画像と対応するマスクを表示するにはどうすればよいのでしょうか?

スクリプトは次のとおりです。

import torch
from torch.utils.data.dataset import Dataset  # For custom data-sets
import torchvision.transforms as transforms
import torchvision.transforms.functional as tf
from PIL import Image
import numpy 
import glob
import matplotlib.pyplot as plt
from split_dataset import test_loader
import os

class CustomDataset(Dataset):
    def __init__(self, image_paths, target_paths, transform_images, transform_masks):   

    self.image_paths = image_paths
    self.target_paths = target_paths

    self.transform_images = transform_images
    self.transform_masks = transform_masks


    self.transformm = transforms.Compose([transforms.Lambda(lambda x: tf.rotate(x, 10)),
                                          transforms.Lambda(lambda x: tf.affine(x, angle=0,
                                      translate=(0, 0),
                                      scale=0.2,
                                      shear=0.2))
                                        ])

    self.transform = transforms.ToTensor()

    self.mapping = {
        0: 0,
        255: 1              
    }

def mask_to_class(self, mask):
    for k in self.mapping:
        mask[mask==k] = self.mapping[k]
    return mask

def __getitem__(self, index):

    image = Image.open(self.image_paths[index])
    mask = Image.open(self.target_paths[index])

    if any([img in self.image_paths[index] for img in self.transform_images]):
        print('applying special transformation')
        image = self.transformm(image) #augmentation

    if any([msk in self.target_paths[index] for msk in self.transform_masks]):
        print('applying special transformation')
        image = self.transformm(mask) #augmentation

    t_image = image.convert('L')
    t_image = self.transform(t_image) # transform to tensor for image
    mask = self.transform(mask) # transform to tensor for mask


    mask = torch.from_numpy(numpy.array(mask, dtype=numpy.uint8)) 
    mask = self.mask_to_class(mask)
    mask = mask.long()

    return t_image, mask, self.image_paths[index], self.target_paths[index] 

def __len__(self):  # return count of sample we have

    return len(self.image_paths)


image_paths = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\imagesResized\\*.png")
target_paths = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\labelsResized\\*.png")


transform_images = ['image_981.png', 'image_982.png','image_983.png', 'image_984.png', 'image_985.png',
                    'image_986.png','image_987.png','image_988.png','image_989.png','image_990.png',
                    'image_991.png']  # apply special transformation only on these images
print(transform_images)
#['image_991.png', 'image_991.png']

transform_masks = ['image_labeled_981.png', 'image_labeled_982.png','image_labeled_983.png', 'image_labeled_984.png',
                    'image_labeled_985.png', 'image_labeled_986.png','image_labeled_987.png','image_labeled_988.png',
                    'image_labeled_989.png','image_labeled_990.png',
                    'image_labeled_991.png'] 

dataset = CustomDataset(image_paths, target_paths, transform_images, transform_masks)

for transform_images in dataset:

    #print(transform_images)        
    transform_images = Image.open(os.path.join(image_paths, transform_images))
    transform_images = numpy.array(transform_images)

    transform_masks = Image.open(os.path.join(target_paths, transform_masks))
    transform_masks = numpy.array(transform_masks)


    fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=True, figsize = (6,6))

    img1 = ax1.imshow(transform_images, cmap='gray')
    ax1.axis('off')   

    img2 = ax2.imshow(transform_masks)
    ax1.axis('off')        
    plt.show() 

現在、エラーが発生しています

path = os.fspath(path)
TypeError: タプルではなく、str、バイト、または os.PathLike オブジェクトが必要です

4

1 に答える 1