1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
   | import os from random import shuffle from turtle import width import torch import torchvision from d2l import torch as d2l
  voc_dir = "./dataset/VOC2012/"
  def read_voc_images(voc_dir, is_train=True):     txt_fname = os.path.join(voc_dir, 'ImageSets', 'Segmentation', 'train.txt' if is_train else 'val.txt')     mode = torchvision.io.image.ImageReadMode.RGB     with open(txt_fname, 'r') as f:         images = f.read().split()
      features, labels = [], []     for i, fname in enumerate(images):         features.append(torchvision.io.read_image(os.path.join(voc_dir, 'JPEGImages', f'{fname}.jpg')))         labels.append(torchvision.io.read_image(os.path.join(voc_dir, 'SegmentationClass', f'{fname}.png'), mode))     return features, labels
  train_features, train_labels = read_voc_images(voc_dir, True)
  VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],                 [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],                 [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],                 [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],                 [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],                 [0, 64, 128]]
  VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',                'bottle', 'bus', 'car', 'cat', 'chair', 'cow',                'diningtable', 'dog', 'horse', 'motorbike', 'person',                'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
  def voc_colormap2label():     """构建从RGB到VOC类别索引的映射"""     colormap2label = torch.zeros(256 ** 3, dtype=torch.long)     for i, colormap in enumerate(VOC_COLORMAP):         colormap2label[             (colormap[0] * 256 + colormap[1]) * 256 + colormap[2]] = i     return colormap2label
 
  def voc_label_indices(colormap, colormap2label):     """将VOC标签中的RGB值映射到它们的类别索引"""     colormap = colormap.permute(1, 2, 0).numpy().astype('int32')     idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256            + colormap[:, :, 2])     return colormap2label[idx]
  y = voc_label_indices(train_labels[0], voc_colormap2label())
  def voc_rand_crop(feature, labek, height, weight):     rect = torchvision.transforms.RandomCrop.get_params(         feature, (height, width))     feature = torchvision.transforms.functional.crop(feature, *rect)     label = torchvision.transforms.functional.crop(label, *rect)     return feature, label
  imgs = [] for _ in range(n):     imgs += voc_rand_crop(train_features[0], train_labels[0], 200, 300)
 
  imgs = [img.permute(1,2,0) for img in imgs]
  class VOCSegDataset(torch.utils.data.Dataset):     """一个用于加载VOC数据集的自定义数据集"""
      def __init__(self, is_train, crop_size, voc_dir):         self.transform = torchvision.transforms.Normalize(             mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])         self.crop_size = crop_size         features, labels = read_voc_images(voc_dir, is_train=is_train)         self.features = [self.normalize_image(feature)                          for feature in self.filter(features)]         self.labels = self.filter(labels)         self.colormap2label = voc_colormap2label()         print('read ' + str(len(self.features)) + ' examples')
      def normalize_image(self, img):         return self.transform(img.float() / 255.)
      def filter(self, imgs):         return [img for img in imgs if (             img.shape[1] >= self.crop_size[0] and             img.shape[2] >= self.crop_size[1])]
      def __getitem__(self, idx):         feature, label = voc_rand_crop(self.features[idx], self.labels[idx],                                        *self.crop_size)         return (feature, voc_label_indices(label, self.colormap2label))
      def __len__(self):         return len(self.features)
  crop_size = (320, 480) voc_train = VOCSegDataset(True, crop_size, voc_dir) voc_test = VOCSegDataset(False, crop_size, voc_dir)
  batch_size = 64 train_iter = torch.utils.data.DataLoader(voc_train, batch_size, shuffle=True, drop_last=True, num_workers=d2l.get_dataloader_workers()) for X, Y in train_iter:     print(X.shape)     print(Y.shape)     break
 
  |