You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

data_processing.py 4.4 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import mindspore.dataset as ds
  16. import mindspore.dataset.vision.c_transforms as CV
  17. import mindspore.dataset.transforms.c_transforms as C
  18. from mindspore.dataset.vision import Inter
  19. import mindspore.common.dtype as mstype
  20. def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1,
  21. num_parallel_workers=1, sparse=True):
  22. """
  23. create dataset for training or testing
  24. """
  25. # define dataset
  26. ds1 = ds.MnistDataset(data_path)
  27. # define operation parameters
  28. resize_height, resize_width = 32, 32
  29. rescale = 1.0 / 255.0
  30. shift = 0.0
  31. # define map operations
  32. resize_op = CV.Resize((resize_height, resize_width),
  33. interpolation=Inter.LINEAR)
  34. rescale_op = CV.Rescale(rescale, shift)
  35. hwc2chw_op = CV.HWC2CHW()
  36. type_cast_op = C.TypeCast(mstype.int32)
  37. # apply map operations on images
  38. if not sparse:
  39. one_hot_enco = C.OneHot(10)
  40. ds1 = ds1.map(input_columns="label", operations=one_hot_enco,
  41. num_parallel_workers=num_parallel_workers)
  42. type_cast_op = C.TypeCast(mstype.float32)
  43. ds1 = ds1.map(input_columns="label", operations=type_cast_op,
  44. num_parallel_workers=num_parallel_workers)
  45. ds1 = ds1.map(input_columns="image", operations=resize_op,
  46. num_parallel_workers=num_parallel_workers)
  47. ds1 = ds1.map(input_columns="image", operations=rescale_op,
  48. num_parallel_workers=num_parallel_workers)
  49. ds1 = ds1.map(input_columns="image", operations=hwc2chw_op,
  50. num_parallel_workers=num_parallel_workers)
  51. # apply DatasetOps
  52. buffer_size = 10000
  53. ds1 = ds1.shuffle(buffer_size=buffer_size)
  54. ds1 = ds1.batch(batch_size, drop_remainder=True)
  55. ds1 = ds1.repeat(repeat_size)
  56. return ds1
  57. def vgg_create_dataset100(data_home, image_size, batch_size, rank_id=0, rank_size=1, repeat_num=1,
  58. training=True, num_samples=None, shuffle=True):
  59. """Data operations."""
  60. ds.config.set_seed(1)
  61. data_dir = os.path.join(data_home, "train")
  62. if not training:
  63. data_dir = os.path.join(data_home, "test")
  64. if num_samples is not None:
  65. data_set = ds.Cifar100Dataset(data_dir, num_shards=rank_size, shard_id=rank_id,
  66. num_samples=num_samples, shuffle=shuffle)
  67. else:
  68. data_set = ds.Cifar100Dataset(data_dir, num_shards=rank_size, shard_id=rank_id)
  69. input_columns = ["fine_label"]
  70. output_columns = ["label"]
  71. data_set = data_set.rename(input_columns=input_columns, output_columns=output_columns)
  72. data_set = data_set.project(["image", "label"])
  73. rescale = 1.0 / 255.0
  74. shift = 0.0
  75. # define map operations
  76. random_crop_op = CV.RandomCrop((32, 32), (4, 4, 4, 4)) # padding_mode default CONSTANT
  77. random_horizontal_op = CV.RandomHorizontalFlip()
  78. resize_op = CV.Resize(image_size) # interpolation default BILINEAR
  79. rescale_op = CV.Rescale(rescale, shift)
  80. normalize_op = CV.Normalize((0.4465, 0.4822, 0.4914), (0.2010, 0.1994, 0.2023))
  81. changeswap_op = CV.HWC2CHW()
  82. type_cast_op = C.TypeCast(mstype.int32)
  83. c_trans = []
  84. if training:
  85. c_trans = [random_crop_op, random_horizontal_op]
  86. c_trans += [resize_op, rescale_op, normalize_op,
  87. changeswap_op]
  88. # apply map operations on images
  89. data_set = data_set.map(input_columns="label", operations=type_cast_op)
  90. data_set = data_set.map(input_columns="image", operations=c_trans)
  91. # apply repeat operations
  92. data_set = data_set.repeat(repeat_num)
  93. # apply shuffle operations
  94. # data_set = data_set.shuffle(buffer_size=1000)
  95. # apply batch operations
  96. data_set = data_set.batch(batch_size=batch_size, drop_remainder=True)
  97. return data_set

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。