Browse Source

fix testcases, add testcase for cpu

tags/v1.6.0
shu-kun-zhang 3 years ago
parent
commit
bdb1259c8d
19 changed files with 1182 additions and 471 deletions
  1. +1
    -0
      .gitignore
  2. +0
    -307
      tests/st/resnet50/resnet_cifar10.py
  3. +0
    -75
      tests/st/resnet50/test_cifar10_attack_fgsm.py
  4. +92
    -4
      tests/ut/python/adv_robustness/attacks/black/test_hsja.py
  5. +108
    -6
      tests/ut/python/adv_robustness/attacks/black/test_nes.py
  6. +46
    -3
      tests/ut/python/adv_robustness/attacks/black/test_pointwise_attack.py
  7. +37
    -3
      tests/ut/python/adv_robustness/attacks/black/test_salt_and_pepper_attack.py
  8. +62
    -6
      tests/ut/python/adv_robustness/attacks/test_batch_generate_attack.py
  9. +54
    -6
      tests/ut/python/adv_robustness/attacks/test_cw.py
  10. +127
    -9
      tests/ut/python/adv_robustness/attacks/test_deep_fool.py
  11. +178
    -12
      tests/ut/python/adv_robustness/attacks/test_iterative_gradient_method.py
  12. +43
    -3
      tests/ut/python/adv_robustness/attacks/test_lbfgs.py
  13. +52
    -4
      tests/ut/python/adv_robustness/detectors/black/test_similarity_detector.py
  14. +66
    -4
      tests/ut/python/adv_robustness/detectors/test_ensemble_detector.py
  15. +134
    -10
      tests/ut/python/adv_robustness/detectors/test_mag_net.py
  16. +87
    -5
      tests/ut/python/adv_robustness/detectors/test_region_based_detector.py
  17. +81
    -5
      tests/ut/python/adv_robustness/detectors/test_spatial_smoothing.py
  18. +8
    -7
      tests/ut/python/reliability/concept_drift/test_concept_drift_images.py
  19. +6
    -2
      tests/ut/python/reliability/concept_drift/test_concept_drift_time_series.py

+ 1
- 0
.gitignore View File

@@ -26,3 +26,4 @@ mindarmour.egg-info/
*pre_trained_model/
*__pycache__/
*kernel_meta
.DS_Store

+ 0
- 307
tests/st/resnet50/resnet_cifar10.py View File

@@ -1,307 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mindspore import nn
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P


def variance_scaling_raw(shape):
value = np.random.normal(size=shape).astype(np.float32)
return Tensor(value)


def weight_variable(shape):
value = np.random.normal(size=shape).astype(np.float32)
return Tensor(value)


def sweight_variable(shape):
value = np.random.uniform(size=shape).astype(np.float32)
return Tensor(value)


def weight_variable_0(shape):
zeros = np.zeros(shape).astype(np.float32)
return Tensor(zeros)


def weight_variable_1(shape):
ones = np.ones(shape).astype(np.float32)
return Tensor(ones)


def conv3x3(in_channels, out_channels, stride=1, padding=0):
"""3x3 convolution """
weight_shape = (out_channels, in_channels, 3, 3)
weight = variance_scaling_raw(weight_shape)
return nn.Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same")


def conv1x1(in_channels, out_channels, stride=1, padding=0):
"""1x1 convolution"""
weight_shape = (out_channels, in_channels, 1, 1)
weight = variance_scaling_raw(weight_shape)
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same")


def conv7x7(in_channels, out_channels, stride=1, padding=0):
"""1x1 convolution"""
weight_shape = (out_channels, in_channels, 7, 7)
weight = variance_scaling_raw(weight_shape)
return nn.Conv2d(in_channels, out_channels,
kernel_size=7, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same")


def bn_with_initialize(out_channels):
shape = (out_channels)
mean = weight_variable_0(shape)
var = weight_variable_1(shape)
beta = weight_variable_0(shape)
gamma = sweight_variable(shape)
bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma,
beta_init=beta, moving_mean_init=mean, moving_var_init=var)
return bn


def bn_with_initialize_last(out_channels):
shape = (out_channels)
mean = weight_variable_0(shape)
var = weight_variable_1(shape)
beta = weight_variable_0(shape)
gamma = sweight_variable(shape)
bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma,
beta_init=beta, moving_mean_init=mean, moving_var_init=var)
return bn


def fc_with_initialize(input_channels, out_channels):
weight_shape = (out_channels, input_channels)

weight = np.random.normal(size=weight_shape).astype(np.float32)
weight = Tensor(weight)

bias_shape = (out_channels)
bias_value = np.random.uniform(size=bias_shape).astype(np.float32)
bias = Tensor(bias_value)

return nn.Dense(input_channels, out_channels, weight, bias)


class ResidualBlock(nn.Cell):
expansion = 4

def __init__(self,
in_channels,
out_channels,
stride=1):
super(ResidualBlock, self).__init__()

out_chls = out_channels // self.expansion
self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
self.bn1 = bn_with_initialize(out_chls)

self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)
self.bn2 = bn_with_initialize(out_chls)

self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
self.bn3 = bn_with_initialize_last(out_channels)

self.relu = P.ReLU()
self.add = P.Add()

def construct(self, x):
identity = x

out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)

out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)

out = self.conv3(out)
out = self.bn3(out)

out = self.add(out, identity)
out = self.relu(out)

return out


class ResidualBlockWithDown(nn.Cell):
expansion = 4

def __init__(self,
in_channels,
out_channels,
stride=1,
down_sample=False):
super(ResidualBlockWithDown, self).__init__()

out_chls = out_channels // self.expansion
self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
self.bn1 = bn_with_initialize(out_chls)

self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)
self.bn2 = bn_with_initialize(out_chls)

self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
self.bn3 = bn_with_initialize_last(out_channels)

self.relu = P.ReLU()
self.downsample = down_sample

self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0)
self.bn_down_sample = bn_with_initialize(out_channels)
self.add = P.Add()

def construct(self, x):
identity = x

out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)

out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)

out = self.conv3(out)
out = self.bn3(out)

identity = self.conv_down_sample(identity)
identity = self.bn_down_sample(identity)

out = self.add(out, identity)
out = self.relu(out)

return out


class MakeLayer0(nn.Cell):

def __init__(self, block, layer_num, in_channels, out_channels, stride):
super(MakeLayer0, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=1, down_sample=True)
self.b = block(out_channels, out_channels, stride=stride)
self.c = block(out_channels, out_channels, stride=1)

def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)

return x


class MakeLayer1(nn.Cell):

def __init__(self, block, layer_num, in_channels, out_channels, stride):
super(MakeLayer1, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
self.b = block(out_channels, out_channels, stride=1)
self.c = block(out_channels, out_channels, stride=1)
self.d = block(out_channels, out_channels, stride=1)

def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)
x = self.d(x)

return x


class MakeLayer2(nn.Cell):

def __init__(self, block, layer_num, in_channels, out_channels, stride):
super(MakeLayer2, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
self.b = block(out_channels, out_channels, stride=1)
self.c = block(out_channels, out_channels, stride=1)
self.d = block(out_channels, out_channels, stride=1)
self.e = block(out_channels, out_channels, stride=1)
self.f = block(out_channels, out_channels, stride=1)

def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)
x = self.d(x)
x = self.e(x)
x = self.f(x)

return x


class MakeLayer3(nn.Cell):

def __init__(self, block, layer_num, in_channels, out_channels, stride):
super(MakeLayer3, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
self.b = block(out_channels, out_channels, stride=1)
self.c = block(out_channels, out_channels, stride=1)

def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)

return x


class ResNet(nn.Cell):

def __init__(self, block, layer_num, num_classes=100):
super(ResNet, self).__init__()
self.num_classes = num_classes

self.conv1 = conv7x7(3, 64, stride=2, padding=0)

self.bn1 = bn_with_initialize(64)
self.relu = P.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2)
self.layer3 = MakeLayer2(block, layer_num[2], in_channels=512, out_channels=1024, stride=2)
self.layer4 = MakeLayer3(block, layer_num[3], in_channels=1024, out_channels=2048, stride=2)

self.pool = P.ReduceMean(keep_dims=True)
self.squeeze = P.Squeeze(axis=(2, 3))
self.fc = fc_with_initialize(512*block.expansion, num_classes)

def construct(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)

x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)

x = self.pool(x, (2, 3))
x = self.squeeze(x)
x = self.fc(x)
return x


def resnet50_cifar10(num_classes):
return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes)

+ 0
- 75
tests/st/resnet50/test_cifar10_attack_fgsm.py View File

@@ -1,75 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Fuction:
Test fgsm attack about resnet50 network
Usage:
py.test test_cifar10_attack_fgsm.py
"""
import numpy as np

import pytest

from mindspore import Tensor
from mindspore import context
from mindspore.nn import Cell
from mindspore.common import dtype as mstype
from mindspore.ops import operations as P
from mindspore.ops import functional as F

from mindarmour.adv_robustness.attacks import FastGradientSignMethod

from resnet_cifar10 import resnet50_cifar10

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")



class CrossEntropyLoss(Cell):
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.cross_entropy = P.SoftmaxCrossEntropyWithLogits()
self.mean = P.ReduceMean()
self.one_hot = P.OneHot()
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)

def construct(self, logits, label):
label = self.one_hot(label, F.shape(logits)[1], self.on_value, self.off_value)
loss = self.cross_entropy(logits, label)[0]
loss = self.mean(loss, (-1,))
return loss


@pytest.mark.level0
@pytest.mark.env_single
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_ascend_inference
def test_fast_gradient_sign_method():
"""
FGSM-Attack test
"""
context.set_context(mode=context.GRAPH_MODE)
# get network
net = resnet50_cifar10(10)

# create test data
test_images = np.random.rand(64, 3, 224, 224).astype(np.float32)
test_labels = np.random.randint(10, size=64).astype(np.int32)
# attacking
loss_fn = CrossEntropyLoss()
attack = FastGradientSignMethod(net, eps=0.1, loss_fn=loss_fn)
adv_data = attack.batch_generate(test_images, test_labels, batch_size=32)
assert np.any(adv_data != test_images)

+ 92
- 4
tests/ut/python/adv_robustness/attacks/black/test_hsja.py View File

@@ -26,7 +26,6 @@ from mindarmour.utils.logger import LogUtil
from tests.ut.python.utils.mock_net import Net

context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend")

LOGGER = LogUtil.get_instance()
TAG = 'HopSkipJumpAttack'
@@ -87,10 +86,86 @@ def get_model():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_hsja_mnist_attack():
def test_hsja_mnist_attack_ascend():
"""
hsja-Attack test
Feature: test HSJA attack for ascend
Description: make sure the HSJA attack works properly
Expectation: predict without any bugs
"""
context.set_context(device_target="Ascend")
current_dir = os.path.dirname(os.path.abspath(__file__))


# get test data
test_images_set = np.load(os.path.join(current_dir,
'../../../dataset/test_images.npy'))
test_labels_set = np.load(os.path.join(current_dir,
'../../../dataset/test_labels.npy'))
# prediction accuracy before attack
model = get_model()
batch_num = 1 # the number of batches of attacking samples
predict_labels = []
i = 0

for img in test_images_set:
i += 1
pred_labels = np.argmax(model.predict(img), axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = test_labels_set[:batch_num]
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s",
accuracy)
test_images = test_images_set[:batch_num]

# attacking
norm = 'l2'
search = 'grid_search'
target = False

attack = HopSkipJumpAttack(model, constraint=norm, stepsize_search=search)
if target:
target_labels = random_target_labels(true_labels)
target_images = create_target_images(test_images_set, test_labels_set,
target_labels)
LOGGER.info(TAG, 'len target labels : %s', len(target_labels))
LOGGER.info(TAG, 'len target_images : %s', len(target_images))
LOGGER.info(TAG, 'len test_images : %s', len(test_images))
attack.set_target_images(target_images)
success_list, adv_data, _ = attack.generate(test_images, target_labels)
else:
success_list, adv_data, _ = attack.generate(test_images, None)
assert (adv_data != test_images).any()

adv_datas = []
gts = []
for success, adv, gt in zip(success_list, adv_data, true_labels):
if success:
adv_datas.append(adv)
gts.append(gt)
if gts:
adv_datas = np.concatenate(np.asarray(adv_datas), axis=0)
gts = np.asarray(gts)
pred_logits_adv = model.predict(adv_datas)
pred_lables_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_lables_adv, gts))
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
accuracy_adv)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_hsja_mnist_attack_cpu():
"""
Feature: test HSJA attack for cpu
Description: make sure the HSJA attack works properly
Expectation: predict without any bugs
"""
context.set_context(device_target="CPU")
current_dir = os.path.dirname(os.path.abspath(__file__))


@@ -158,7 +233,20 @@ def test_hsja_mnist_attack():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error():
def test_value_error_ascend():
context.set_context(device_target="Ascend")
model = get_model()
norm = 'l2'
with pytest.raises(ValueError):
assert HopSkipJumpAttack(model, constraint=norm, stepsize_search='bad-search')


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error_cpu():
context.set_context(device_target="CPU")
model = get_model()
norm = 'l2'
with pytest.raises(ValueError):


+ 108
- 6
tests/ut/python/adv_robustness/attacks/black/test_nes.py View File

@@ -26,7 +26,6 @@ from mindarmour.utils.logger import LogUtil
from tests.ut.python.utils.mock_net import Net

context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend")

LOGGER = LogUtil.get_instance()
TAG = 'HopSkipJumpAttack'
@@ -165,8 +164,30 @@ def nes_mnist_attack(scene, top_k):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nes_query_limit():
def test_nes_query_limit_ascend():
"""
Feature: nes query limited for ascend
Description: make sure the attck in query limit scene works properly
Expectation: attack without any bugs
"""
# scene is in ['Query_Limit', 'Partial_Info', 'Label_Only']
context.set_context(device_target="Ascend")
scene = 'Query_Limit'
nes_mnist_attack(scene, top_k=-1)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nes_query_limit_cpu():
"""
Feature: nes query limited for cpu
Description: make sure the attck in query limit scene works properly
Expectation: attack without any bugs
"""
# scene is in ['Query_Limit', 'Partial_Info', 'Label_Only']
context.set_context(device_target="CPU")
scene = 'Query_Limit'
nes_mnist_attack(scene, top_k=-1)

@@ -176,8 +197,30 @@ def test_nes_query_limit():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nes_partial_info():
def test_nes_partial_info_ascend():
"""
Feature: nes partial info for ascend
Description: make sure the attck in partial info scene works properly
Expectation: attack without any bugs
"""
# scene is in ['Query_Limit', 'Partial_Info', 'Label_Only']
context.set_context(device_target="Ascend")
scene = 'Partial_Info'
nes_mnist_attack(scene, top_k=5)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nes_partial_info_cpu():
"""
Feature: nes partial info for cpu
Description: make sure the attck in partial info scene works properly
Expectation: attack without any bugs
"""
# scene is in ['Query_Limit', 'Partial_Info', 'Label_Only']
context.set_context(device_target="CPU")
scene = 'Partial_Info'
nes_mnist_attack(scene, top_k=5)

@@ -187,8 +230,30 @@ def test_nes_partial_info():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nes_label_only():
def test_nes_label_only_ascend():
"""
Feature: nes label only for ascend
Description: make sure the attck in label only scene works properly
Expectation: attack without any bugs
"""
# scene is in ['Query_Limit', 'Partial_Info', 'Label_Only']
context.set_context(device_target="Ascend")
scene = 'Label_Only'
nes_mnist_attack(scene, top_k=5)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nes_label_only_cpu():
"""
Feature: nes label only for cpu
Description: make sure the attck in label only scene works properly
Expectation: attack without any bugs
"""
# scene is in ['Query_Limit', 'Partial_Info', 'Label_Only']
context.set_context(device_target="CPU")
scene = 'Label_Only'
nes_mnist_attack(scene, top_k=5)

@@ -198,8 +263,20 @@ def test_nes_label_only():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error():
def test_value_error_ascend():
"""test that exception is raised for invalid labels"""
context.set_context(device_target="Ascend")
with pytest.raises(ValueError):
assert nes_mnist_attack('Label_Only', -1)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error_cpu():
"""test that exception is raised for invalid labels"""
context.set_context(device_target="CPU")
with pytest.raises(ValueError):
assert nes_mnist_attack('Label_Only', -1)

@@ -209,7 +286,32 @@ def test_value_error():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_none():
def test_none_ascend():
"""
Feature: nes none for ascend
Description: detect error or works properly
Expectation: detect error or works properly
"""
context.set_context(device_target="Ascend")
current_dir = os.path.dirname(os.path.abspath(__file__))
model = get_model(current_dir)
test_images, test_labels = get_dataset(current_dir)
nes = NES(model, 'Partial_Info')
with pytest.raises(ValueError):
assert nes.generate(test_images, test_labels)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_none_cpu():
"""
Feature: nes none for cpu
Description: detect error or works properly
Expectation: detect error or works properly
"""
context.set_context(device_target="CPU")
current_dir = os.path.dirname(os.path.abspath(__file__))
model = get_model(current_dir)
test_images, test_labels = get_dataset(current_dir)


+ 46
- 3
tests/ut/python/adv_robustness/attacks/black/test_pointwise_attack.py View File

@@ -28,7 +28,6 @@ from mindarmour.utils.logger import LogUtil

from tests.ut.python.utils.mock_net import Net

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")

LOGGER = LogUtil.get_instance()
TAG = 'Pointwise_Test'
@@ -53,10 +52,54 @@ class ModelToBeAttacked(BlackModel):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pointwise_attack_method():
def test_pointwise_attack_method_ascend():
"""
Pointwise attack method unit test.
Feature: Pointwise attack method unit test for ascend
Description: Given a image, we want to make sure the adversarial example
generated is different from the image
Expectation: input_np != adv_data
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(123)
# upload trained network
current_dir = os.path.dirname(os.path.abspath(__file__))
ckpt_path = os.path.join(current_dir,
'../../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
net = Net()
load_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, load_dict)

# get one mnist image
input_np = np.load(os.path.join(current_dir,
'../../../dataset/test_images.npy'))[:3]
labels = np.load(os.path.join(current_dir,
'../../../dataset/test_labels.npy'))[:3]
model = ModelToBeAttacked(net)
pre_label = np.argmax(model.predict(input_np), axis=1)
LOGGER.info(TAG, 'original sample predict labels are :{}'.format(pre_label))
LOGGER.info(TAG, 'true labels are: {}'.format(labels))
attack = PointWiseAttack(model, sparse=True, is_targeted=False)
is_adv, adv_data, _ = attack.generate(input_np, pre_label)
LOGGER.info(TAG, 'adv sample predict labels are: {}'
.format(np.argmax(model.predict(adv_data), axis=1)))

assert np.any(adv_data[is_adv][0] != input_np[is_adv][0]), 'Pointwise attack method: ' \
'generate value must not be equal' \
' to original value.'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pointwise_attack_method_cpu():
"""
Feature: Pointwise attack method unit test for cpu
Description: Given a image, we want to make sure the adversarial example
generated is different from the image
Expectation: input_np != adv_data
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(123)
# upload trained network
current_dir = os.path.dirname(os.path.abspath(__file__))


+ 37
- 3
tests/ut/python/adv_robustness/attacks/black/test_salt_and_pepper_attack.py View File

@@ -27,7 +27,6 @@ from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack
from tests.ut.python.utils.mock_net import Net

context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend")


class ModelToBeAttacked(BlackModel):
@@ -48,10 +47,45 @@ class ModelToBeAttacked(BlackModel):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_salt_and_pepper_attack_method():
def test_salt_and_pepper_attack_method_ascend():
"""
Salt and pepper attack method unit test.
Feature: Salt and pepper attack method unit test for ascend
Description: Given a image, we want to make sure the adversarial example
generated is different from the image
Expectation: inputs != adv_data
"""
context.set_context(device_target="Ascend")
np.random.seed(123)
# upload trained network
current_dir = os.path.dirname(os.path.abspath(__file__))
ckpt_path = os.path.join(current_dir,
'../../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
net = Net()
load_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, load_dict)

# get one mnist image
inputs = np.load(os.path.join(current_dir, '../../../dataset/test_images.npy'))[:3]
labels = np.load(os.path.join(current_dir, '../../../dataset/test_labels.npy'))[:3]
model = ModelToBeAttacked(net)

attack = SaltAndPepperNoiseAttack(model, sparse=True)
_, adv_data, _ = attack.generate(inputs, labels)
assert np.any(adv_data[0] != inputs[0]), 'Salt and pepper attack method: generate value must not be equal' \
' to original value.'

@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_salt_and_pepper_attack_method_cpu():
"""
Feature: Salt and pepper attack method unit test for cpu
Description: Given a image, we want to make sure the adversarial example
generated is different from the image
Expectation: inputs != adv_data
"""
context.set_context(device_target="CPU")
np.random.seed(123)
# upload trained network
current_dir = os.path.dirname(os.path.abspath(__file__))


+ 62
- 6
tests/ut/python/adv_robustness/attacks/test_batch_generate_attack.py View File

@@ -25,8 +25,6 @@ from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
from mindarmour.adv_robustness.attacks import FastGradientMethod


context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


# for user
class Net(Cell):
@@ -114,10 +112,36 @@ class GradWrapWithLoss(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_batch_generate_attack():
def test_batch_generate_attack_ascend():
"""
Feature: Attack with batch-generate for ascend
Description: Given a image, we want to make sure the adversarial example
generated is different from the image
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_np = np.random.random((128, 10)).astype(np.float32)
label = np.random.randint(0, 10, 128).astype(np.int32)
label = np.eye(10)[label].astype(np.float32)

attack = FastGradientMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
ms_adv_x = attack.batch_generate(input_np, label, batch_size=32)

assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \
' must not be equal to original value.'

@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_batch_generate_attack_cpu():
"""
Attack with batch-generate.
Feature: Attack with batch-generate for cpu
Description: Given a image, we want to make sure the adversarial example
generated is different from the image
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_np = np.random.random((128, 10)).astype(np.float32)
label = np.random.randint(0, 10, 128).astype(np.int32)
label = np.eye(10)[label].astype(np.float32)
@@ -129,14 +153,18 @@ def test_batch_generate_attack():
' must not be equal to original value.'



@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_batch_generate_attack_multi_inputs():
def test_batch_generate_attack_multi_inputs_ascend():
"""
Attack with batch-generate by multi-inputs.
Feature: Attack with batch-generate by multi-inputs for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: inputs != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
inputs1 = np.random.random((128, 10)).astype(np.float32)
@@ -153,3 +181,31 @@ def test_batch_generate_attack_multi_inputs():

assert np.any(ms_adv_x != inputs1), 'Fast gradient method: generate value' \
' must not be equal to original value.'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_batch_generate_attack_multi_inputs_cpu():
"""
Feature: Attack with batch-generate by multi-inputs for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: inputs != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
inputs1 = np.random.random((128, 10)).astype(np.float32)
inputs2 = np.random.random((128, 10)).astype(np.float32)
labels1 = np.random.randint(0, 10, 128).astype(np.int32)
labels2 = np.random.randint(0, 10, 128).astype(np.int32)
labels1 = np.eye(10)[labels1].astype(np.float32)
labels2 = np.eye(10)[labels2].astype(np.float32)

with_loss_cell = WithLossCell(Net2(), LossNet())
grad_with_loss_net = GradWrapWithLoss(with_loss_cell)
attack = FastGradientMethod(grad_with_loss_net)
ms_adv_x = attack.batch_generate((inputs1, inputs2), (labels1, labels2), batch_size=32)

assert np.any(ms_adv_x != inputs1), 'Fast gradient method: generate value' \
' must not be equal to original value.'

+ 54
- 6
tests/ut/python/adv_robustness/attacks/test_cw.py View File

@@ -24,8 +24,6 @@ from mindspore import context
from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack


context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


# for user
class Net(Cell):
@@ -59,10 +57,35 @@ class Net(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack():
def test_cw_attack_ascend():
"""
Feature: CW-Attack test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
label_np = np.array([3]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=False)
adv_data = attack.generate(input_np, label_np)
assert np.any(input_np != adv_data)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_cpu():
"""
CW-Attack test
Feature: CW-Attack test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
label_np = np.array([3]).astype(np.int64)
@@ -77,10 +100,35 @@ def test_cw_attack():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_targeted():
def test_cw_attack_targeted_ascend():
"""
Feature: CW-Attack-Targeted test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
target_np = np.array([1]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=True)
adv_data = attack.generate(input_np, target_np)
assert np.any(input_np != adv_data)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_targeted_cpu():
"""
CW-Attack test
Feature: CW-Attack-Targeted test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
target_np = np.array([1]).astype(np.int64)


+ 127
- 9
tests/ut/python/adv_robustness/attacks/test_deep_fool.py View File

@@ -24,8 +24,6 @@ from mindspore import Tensor

from mindarmour.adv_robustness.attacks import DeepFool

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


# for user
class Net(Cell):
@@ -76,10 +74,42 @@ class Net2(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_deepfool_attack():
def test_deepfool_attack_ascend():
"""
Deepfool-Attack test
Feature: Deepfool-Attack test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_shape = (1, 5)
_, classes = input_shape
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
input_me = Tensor(input_np)
true_labels = np.argmax(net(input_me).asnumpy(), axis=1)
attack = DeepFool(net, classes, max_iters=10, norm_level=2,
bounds=(0.0, 1.0))
adv_data = attack.generate(input_np, true_labels)
# expected adv value
expect_value = np.asarray([[0.10300991, 0.20332647, 0.59308802, 0.59651263,
0.40406296]])
assert np.allclose(adv_data, expect_value), 'mindspore deepfool_method' \
' implementation error, ms_adv_x != expect_value'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_deepfool_attack_cpu():
"""
Feature: Deepfool-Attack test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_shape = (1, 5)
_, classes = input_shape
@@ -101,10 +131,40 @@ def test_deepfool_attack():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_deepfool_attack_detection():
def test_deepfool_attack_detection_ascend():
"""
Feature: Deepfool-Attack-Detection test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net2()
inputs1_np = np.random.random((2, 10, 10)).astype(np.float32)
inputs2_np = np.random.random((2, 10, 5)).astype(np.float32)
gt_boxes, gt_logits = net(Tensor(inputs1_np), Tensor(inputs2_np))
gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()
gt_labels = np.argmax(gt_logits, axis=2)
num_classes = 10

attack = DeepFool(net, num_classes, model_type='detection', reserve_ratio=0.3,
bounds=(0.0, 1.0))
adv_data = attack.generate((inputs1_np, inputs2_np), (gt_boxes, gt_labels))
assert np.any(adv_data != inputs1_np)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_deepfool_attack_detection_cpu():
"""
Deepfool-Attack test
Feature: Deepfool-Attack-Detection test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net2()
inputs1_np = np.random.random((2, 10, 10)).astype(np.float32)
inputs2_np = np.random.random((2, 10, 5)).astype(np.float32)
@@ -124,10 +184,38 @@ def test_deepfool_attack_detection():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_deepfool_attack_inf():
def test_deepfool_attack_inf_ascend():
"""
Feature: Deepfool-Attack with inf-norm test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_shape = (1, 5)
_, classes = input_shape
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
input_me = Tensor(input_np)
true_labels = np.argmax(net(input_me).asnumpy(), axis=1)
attack = DeepFool(net, classes, max_iters=10, norm_level=np.inf,
bounds=(0.0, 1.0))
adv_data = attack.generate(input_np, true_labels)
assert np.any(input_np != adv_data)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_deepfool_attack_inf_cpu():
"""
Deepfool-Attack test
Feature: Deepfool-Attack with inf-norm test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_shape = (1, 5)
_, classes = input_shape
@@ -145,7 +233,37 @@ def test_deepfool_attack_inf():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error():
def test_value_error_ascend():
"""
Feature: value error test for ascend
Description: value error for deep fool
Expectation: attack.generate works
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_shape = (1, 5)
_, classes = input_shape
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
input_me = Tensor(input_np)
true_labels = np.argmax(net(input_me).asnumpy(), axis=1)
with pytest.raises(NotImplementedError):
# norm_level=0 is not available
attack = DeepFool(net, classes, max_iters=10, norm_level=1,
bounds=(0.0, 1.0))
assert attack.generate(input_np, true_labels)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error_cpu():
"""
Feature: value error test for cpu
Description: value error for deep fool
Expectation: attack.generate works
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_shape = (1, 5)
_, classes = input_shape


+ 178
- 12
tests/ut/python/adv_robustness/attacks/test_iterative_gradient_method.py View File

@@ -29,7 +29,6 @@ from mindarmour.adv_robustness.attacks import IterativeGradientMethod
from mindarmour.adv_robustness.attacks import DiverseInputIterativeMethod
from mindarmour.adv_robustness.attacks import MomentumDiverseInputIterativeMethod

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


# for user
@@ -61,10 +60,39 @@ class Net(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_basic_iterative_method():
def test_basic_iterative_method_ascend():
"""
Basic iterative method unit test.
Feature: Basic iterative method unit test for ascend.
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)

for i in range(5):
net = Net()
attack = BasicIterativeMethod(net, nb_iter=i + 1, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
ms_adv_x = attack.generate(input_np, label)
assert np.any(
ms_adv_x != input_np), 'Basic iterative method: generate value' \
' must not be equal to original value.'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_basic_iterative_method_cpu():
"""
Feature: Basic iterative method unit test for cpu.
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)
@@ -83,10 +111,38 @@ def test_basic_iterative_method():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_momentum_iterative_method():
def test_momentum_iterative_method_ascend():
"""
Momentum iterative method unit test.
Feature: Momentum iterative method unit test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)

for i in range(5):
attack = MomentumIterativeMethod(Net(), nb_iter=i + 1, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
ms_adv_x = attack.generate(input_np, label)
assert np.any(ms_adv_x != input_np), 'Momentum iterative method: generate' \
' value must not be equal to' \
' original value.'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_momentum_iterative_method_cpu():
"""
Feature: Momentum iterative method unit test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)
@@ -104,10 +160,40 @@ def test_momentum_iterative_method():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_projected_gradient_descent_method():
def test_projected_gradient_descent_method_ascend():
"""
Feature: Projected gradient descent method unit test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)

for i in range(5):
attack = ProjectedGradientDescent(Net(), nb_iter=i + 1, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
ms_adv_x = attack.generate(input_np, label)

assert np.any(
ms_adv_x != input_np), 'Projected gradient descent method: ' \
'generate value must not be equal to' \
' original value.'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_projected_gradient_descent_method_cpu():
"""
Projected gradient descent method unit test.
Feature: Projected gradient descent method unit test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)
@@ -127,10 +213,37 @@ def test_projected_gradient_descent_method():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_diverse_input_iterative_method():
def test_diverse_input_iterative_method_ascend():
"""
Feature: Diverse input iterative method unit test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)

attack = DiverseInputIterativeMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
ms_adv_x = attack.generate(input_np, label)
assert np.any(ms_adv_x != input_np), 'Diverse input iterative method: generate' \
' value must not be equal to' \
' original value.'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_diverse_input_iterative_method_cpu():
"""
Diverse input iterative method unit test.
Feature: Diverse input iterative method unit test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)
@@ -147,10 +260,38 @@ def test_diverse_input_iterative_method():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_momentum_diverse_input_iterative_method():
def test_momentum_diverse_input_iterative_method_ascend():
"""
Momentum diverse input iterative method unit test.
Feature: Momentum diverse input iterative method unit test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)

attack = MomentumDiverseInputIterativeMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
ms_adv_x = attack.generate(input_np, label)
assert np.any(ms_adv_x != input_np), 'Momentum diverse input iterative method: ' \
'generate value must not be equal to' \
' original value.'


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_momentum_diverse_input_iterative_method_cpu():
"""
Feature: Momentum diverse input iterative method unit test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x

"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)
@@ -167,7 +308,32 @@ def test_momentum_diverse_input_iterative_method():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_error():
def test_error_ascend():
"""
Feature: test error for ascend
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
attack = IterativeGradientMethod(Net(), bounds=(0.0, 1.0), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
with pytest.raises(NotImplementedError):
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)
assert attack.generate(input_np, label)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_error_cpu():
"""
Feature: test error for cpu
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
attack = IterativeGradientMethod(Net(), bounds=(0.0, 1.0), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
with pytest.raises(NotImplementedError):
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)


+ 43
- 3
tests/ut/python/adv_robustness/attacks/test_lbfgs.py View File

@@ -26,7 +26,6 @@ from mindarmour.utils.logger import LogUtil

from tests.ut.python.utils.mock_net import Net

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


LOGGER = LogUtil.get_instance()
@@ -39,10 +38,51 @@ LOGGER.set_level('DEBUG')
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_lbfgs_attack():
def test_lbfgs_attack_ascend():
"""
LBFGS-Attack test
Feature: LBFGS-Attack testfor ascend
Description: make sure that attack.generate works properly
Expectation: attack.generate works properly
"""

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(123)
# upload trained network
current_dir = os.path.dirname(os.path.abspath(__file__))
ckpt_path = os.path.join(current_dir,
'../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
net = Net()
load_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, load_dict)

# get one mnist image
input_np = np.load(os.path.join(current_dir,
'../../dataset/test_images.npy'))[:1]
label_np = np.load(os.path.join(current_dir,
'../../dataset/test_labels.npy'))[:1]
LOGGER.debug(TAG, 'true label is :{}'.format(label_np[0]))
classes = 10
target_np = np.random.randint(0, classes, 1)
while target_np == label_np[0]:
target_np = np.random.randint(0, classes)
target_np = np.eye(10)[target_np].astype(np.float32)

attack = LBFGS(net, is_targeted=True)
LOGGER.debug(TAG, 'target_np is :{}'.format(target_np[0]))
_ = attack.generate(input_np, target_np)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_lbfgs_attack_cpu():
"""
Feature: LBFGS-Attack testfor cpu
Description: make sure that attack.generate works properly
Expectation: attack.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(123)
# upload trained network
current_dir = os.path.dirname(os.path.abspath(__file__))


+ 52
- 4
tests/ut/python/adv_robustness/detectors/black/test_similarity_detector.py View File

@@ -24,8 +24,6 @@ from mindspore.ops.operations import Add
from mindarmour.adv_robustness.detectors import SimilarityDetector
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class EncoderNet(Cell):
"""
@@ -62,10 +60,60 @@ class EncoderNet(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_similarity_detector():
def test_similarity_detector_ascend():
"""
Feature: Similarity detector unit test for ascend
Description: make sure the similarity detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# Prepare dataset
np.random.seed(5)
x_train = np.random.rand(1000, 32, 32, 3).astype(np.float32)
perm = np.random.permutation(x_train.shape[0])
# Generate query sequences
benign_queries = x_train[perm[:1000], :, :, :]
suspicious_queries = x_train[perm[-1], :, :, :] + np.random.normal(
0, 0.05, (1000,) + x_train.shape[1:])
suspicious_queries = suspicious_queries.astype(np.float32)
# explicit threshold not provided, calculate threshold for K
encoder = Model(EncoderNet(encode_dim=256))
detector = SimilarityDetector(max_k_neighbor=50, trans_model=encoder)
num_nearest_neighbors, thresholds = detector.fit(inputs=x_train)
detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1])
detector.detect(benign_queries)
detections = detector.get_detection_interval()
# compare
expected_value = 0
assert len(detections) == expected_value
detector.clear_buffer()
detector.detect(suspicious_queries)
# compare
expected_value = [1051, 1102, 1153, 1204, 1255,
1306, 1357, 1408, 1459, 1510,
1561, 1612, 1663, 1714, 1765,
1816, 1867, 1918, 1969]
assert np.all(detector.get_detected_queries() == expected_value)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_similarity_detector_cpu():
"""
Similarity detector unit test
Feature: Similarity detector unit test for cpu
Description: make sure the similarity detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# Prepare dataset
np.random.seed(5)
x_train = np.random.rand(1000, 32, 32, 3).astype(np.float32)


+ 66
- 4
tests/ut/python/adv_robustness/detectors/test_ensemble_detector.py View File

@@ -26,7 +26,6 @@ from mindarmour.adv_robustness.detectors import ErrorBasedDetector
from mindarmour.adv_robustness.detectors import RegionBasedDetector
from mindarmour.adv_robustness.detectors import EnsembleDetector

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


class Net(Cell):
@@ -70,10 +69,41 @@ class AutoNet(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_ensemble_detector():
def test_ensemble_detector_ascend():
"""
Compute mindspore result.
Feature: Compute mindspore result for ascend
Description: make sure the ensemble detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(6)
adv = np.random.rand(4, 4).astype(np.float32)
model = Model(Net())
auto_encoder = Model(AutoNet())
random_label = np.random.randint(10, size=4)
labels = np.eye(10)[random_label]
magnet_detector = ErrorBasedDetector(auto_encoder)
region_detector = RegionBasedDetector(model)
# use this to enable radius in region_detector
region_detector.fit(adv, labels)
detectors = [magnet_detector, region_detector]
detector = EnsembleDetector(detectors)
detected_res = detector.detect(adv)
expected_value = np.array([0, 1, 0, 0])
assert np.all(detected_res == expected_value)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_ensemble_detector_cpu():
"""
Feature: Compute mindspore result for ascend
Description: make sure the ensemble detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(6)
adv = np.random.rand(4, 4).astype(np.float32)
model = Model(Net())
@@ -96,7 +126,39 @@ def test_ensemble_detector():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_error():
def test_error_ascend():
"""
Feature: test error for ascend
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(6)
adv = np.random.rand(4, 4).astype(np.float32)
model = Model(Net())
auto_encoder = Model(AutoNet())
random_label = np.random.randint(10, size=4)
labels = np.eye(10)[random_label]
magnet_detector = ErrorBasedDetector(auto_encoder)
region_detector = RegionBasedDetector(model)
# use this to enable radius in region_detector
detectors = [magnet_detector, region_detector]
detector = EnsembleDetector(detectors)
with pytest.raises(NotImplementedError):
assert detector.fit(adv, labels)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_error_cpu():
"""
Feature: test error for cpu
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(6)
adv = np.random.rand(4, 4).astype(np.float32)
model = Model(Net())


+ 134
- 10
tests/ut/python/adv_robustness/detectors/test_mag_net.py View File

@@ -26,7 +26,6 @@ from mindspore import context
from mindarmour.adv_robustness.detectors import ErrorBasedDetector
from mindarmour.adv_robustness.detectors import DivergenceBasedDetector

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


class Net(Cell):
@@ -75,10 +74,36 @@ class PredNet(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net():
def test_mag_net_ascend():
"""
Compute mindspore result.
Feature: Compute mindspore result for ascend
Description: make sure the error-based detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(5)
ori = np.random.rand(4, 4, 4).astype(np.float32)
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
model = Model(Net())
detector = ErrorBasedDetector(model)
detector.fit(ori)
detected_res = detector.detect(adv)
expected_value = np.array([1, 1, 1, 1])
assert np.all(detected_res == expected_value)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net_cpu():
"""
Feature: Compute mindspore result for cpu
Description: make sure the error-based detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(5)
ori = np.random.rand(4, 4, 4).astype(np.float32)
np.random.seed(6)
@@ -96,10 +121,32 @@ def test_mag_net():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net_transform():
def test_mag_net_transform_ascend():
"""
Feature: Compute mindspore result for ascend
Description: make sure the transform function works properly
Expectation: adv_trans != adv
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
model = Model(Net())
detector = ErrorBasedDetector(model)
adv_trans = detector.transform(adv)
assert np.any(adv_trans != adv)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net_transform_cpu():
"""
Compute mindspore result.
Feature: Compute mindspore result for cpu
Description: make sure the transform function works properly
Expectation: adv_trans != adv
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
model = Model(Net())
@@ -113,10 +160,38 @@ def test_mag_net_transform():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net_divergence():
def test_mag_net_divergence_ascend():
"""
Compute mindspore result.
Feature: Compute mindspore result for ascend
Description: make sure the divergence-based detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(5)
ori = np.random.rand(4, 4, 4).astype(np.float32)
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
encoder = Model(Net())
model = Model(PredNet())
detector = DivergenceBasedDetector(encoder, model)
threshold = detector.fit(ori)
detector.set_threshold(threshold)
detected_res = detector.detect(adv)
expected_value = np.array([1, 0, 1, 1])
assert np.all(detected_res == expected_value)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net_divergence_cpu():
"""
Feature: Compute mindspore result for cpu
Description: make sure the divergence-based detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(5)
ori = np.random.rand(4, 4, 4).astype(np.float32)
np.random.seed(6)
@@ -136,10 +211,33 @@ def test_mag_net_divergence():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net_divergence_transform():
def test_mag_net_divergence_transform_ascend():
"""
Compute mindspore result.
Feature: Compute mindspore result for ascend
Description: make sure the transform function works properly
Expectation: adv_trans != adv
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
encoder = Model(Net())
model = Model(PredNet())
detector = DivergenceBasedDetector(encoder, model)
adv_trans = detector.transform(adv)
assert np.any(adv_trans != adv)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_mag_net_divergence_transform_cpu():
"""
Feature: Compute mindspore result for cpu
Description: make sure the transform function works properly
Expectation: adv_trans != adv
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
encoder = Model(Net())
@@ -154,7 +252,33 @@ def test_mag_net_divergence_transform():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error():
def test_value_error_ascend():
"""
Feature: test error for ascend
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
encoder = Model(Net())
model = Model(PredNet())
detector = DivergenceBasedDetector(encoder, model, option='bad_op')
with pytest.raises(NotImplementedError):
assert detector.detect_diff(adv)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error_cpu():
"""
Feature: test error for cpu
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(6)
adv = np.random.rand(4, 4, 4).astype(np.float32)
encoder = Model(Net())


+ 87
- 5
tests/ut/python/adv_robustness/detectors/test_region_based_detector.py View File

@@ -25,8 +25,6 @@ from mindspore.ops.operations import Add
from mindarmour.adv_robustness.detectors import RegionBasedDetector


context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


class Net(Cell):
"""
@@ -51,10 +49,39 @@ class Net(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_region_based_classification():
def test_region_based_classification_ascend():
"""
Compute mindspore result.
Feature: Compute mindspore result for ascend
Description: make sure the region-based detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(5)
ori = np.random.rand(4, 4).astype(np.float32)
labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0],
[0, 1, 0, 0]]).astype(np.int32)
np.random.seed(6)
adv = np.random.rand(4, 4).astype(np.float32)
model = Model(Net())
detector = RegionBasedDetector(model)
radius = detector.fit(ori, labels)
detector.set_radius(radius)
detected_res = detector.detect(adv)
expected_value = np.array([0, 0, 1, 0])
assert np.all(detected_res == expected_value)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_region_based_classification_cpu():
"""
Feature: Compute mindspore result for cpu
Description: make sure the region-based detector works as expected
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(5)
ori = np.random.rand(4, 4).astype(np.float32)
labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0],
@@ -75,7 +102,62 @@ def test_region_based_classification():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error():
def test_value_error_ascend():
"""
Feature: test error for cpu
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
np.random.seed(5)
ori = np.random.rand(4, 4).astype(np.float32)
labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0],
[0, 1, 0, 0]]).astype(np.int32)
np.random.seed(6)
adv = np.random.rand(4, 4).astype(np.float32)
model = Model(Net())
# model should be mindspore model
with pytest.raises(TypeError):
assert RegionBasedDetector(Net())

with pytest.raises(ValueError):
assert RegionBasedDetector(model, number_points=-1)

with pytest.raises(ValueError):
assert RegionBasedDetector(model, initial_radius=-1)

with pytest.raises(ValueError):
assert RegionBasedDetector(model, max_radius=-2.2)

with pytest.raises(ValueError):
assert RegionBasedDetector(model, search_step=0)

with pytest.raises(TypeError):
assert RegionBasedDetector(model, sparse='False')

detector = RegionBasedDetector(model)
with pytest.raises(TypeError):
# radius must not empty
assert detector.detect(adv)

radius = detector.fit(ori, labels)
detector.set_radius(radius)
with pytest.raises(TypeError):
# adv type should be in (list, tuple, numpy.ndarray)
assert detector.detect(adv.tostring())


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_value_error_cpu():
"""
Feature: test error for cpu
Description: test error
Expectation: error detected or attach.generate works properly
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
np.random.seed(5)
ori = np.random.rand(4, 4).astype(np.float32)
labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0],


+ 81
- 5
tests/ut/python/adv_robustness/detectors/test_spatial_smoothing.py View File

@@ -24,7 +24,6 @@ from mindspore import context

from mindarmour.adv_robustness.detectors import SpatialSmoothing

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


# for use
@@ -51,10 +50,44 @@ class Net(Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_spatial_smoothing():
def test_spatial_smoothing_ascend():
"""
Compute mindspore result.
Feature: Compute mindspore result for ascend
Description: make sure the spatial smoothing detector works properly
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_shape = (50, 3)

np.random.seed(1)
input_np = np.random.randn(*input_shape).astype(np.float32)

np.random.seed(2)
adv_np = np.random.randn(*input_shape).astype(np.float32)

# mock user model
model = Model(Net())
detector = SpatialSmoothing(model)
# Training
threshold = detector.fit(input_np)
detector.set_threshold(threshold.item())
detected_res = np.array(detector.detect(adv_np))
idx = np.where(detected_res > 0)
expected_value = np.array([10, 39, 48])
assert np.all(idx == expected_value)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_spatial_smoothing_cpu():
"""
Feature: Compute mindspore result for cpu
Description: make sure the spatial smoothing detector works properly
Expectation: detected_res == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_shape = (50, 3)

np.random.seed(1)
@@ -80,10 +113,53 @@ def test_spatial_smoothing():
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_spatial_smoothing_diff():
def test_spatial_smoothing_diff_ascend():
"""
Feature: Compute mindspore result for ascend
Description: make sure the detect diff function works properly
Expectation: diffs == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
input_shape = (50, 3)
np.random.seed(1)
input_np = np.random.randn(*input_shape).astype(np.float32)

np.random.seed(2)
adv_np = np.random.randn(*input_shape).astype(np.float32)

# mock user model
model = Model(Net())
detector = SpatialSmoothing(model)
# Training
detector.fit(input_np)
diffs = detector.detect_diff(adv_np)
expected_value = np.array([0.20959496, 0.69537306, 0.13034256, 0.7421039,
0.41419053, 0.56346416, 0.4277994, 0.3240941,
0.048190027, 0.6806958, 1.1405756, 0.587804,
0.40533313, 0.2875523, 0.36801508, 0.61993587,
0.49286827, 0.13222921, 0.68012404, 0.4164942,
0.25758877, 0.6008735, 0.60623455, 0.34981924,
0.3945489, 0.879787, 0.3934811, 0.23387678,
0.63480926, 0.56435543, 0.16067612, 0.57489645,
0.21772699, 0.55924356, 0.5186635, 0.7094835,
0.0613693, 0.13305652, 0.11505881, 1.2404268,
0.50948, 0.15797901, 0.44473758, 0.2495422,
0.38254014, 0.543059, 0.06452079, 0.36902517,
1.1845329, 0.3870097])
assert np.allclose(diffs, expected_value, 0.0001, 0.0001)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_spatial_smoothing_diff_cpu():
"""
Compute mindspore result.
Feature: Compute mindspore result for cpu
Description: make sure the detect diff function works properly
Expectation: diffs == expected_value
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
input_shape = (50, 3)
np.random.seed(1)
input_np = np.random.randn(*input_shape).astype(np.float32)


+ 8
- 7
tests/ut/python/reliability/concept_drift/test_concept_drift_images.py View File

@@ -19,13 +19,11 @@ Concept drift test for images.
import logging
import pytest
import numpy as np
from mindspore import Tensor
from mindspore.train.model import Model
from mindarmour.utils.logger import LogUtil
from mindspore import Model, nn, context
from examples.common.networks.lenet5.lenet5_net_for_fuzzing import LeNet5
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindarmour.reliability.concept_drift.concept_drift_check_images import OodDetectorFeatureCluster
from mindspore import Model, context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from examples.common.networks.lenet5.lenet5_net_for_fuzzing import LeNet5

LOGGER = LogUtil.get_instance()
TAG = 'Concept_Test'
@@ -37,11 +35,14 @@ TAG = 'Concept_Test'
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cp():
def test_concept_drift_image_ascend():
"""
Concept drift test
Feature: test concept drift with images
Description: make sure the odd detector working properly
Expectation: assert np.any(result >=0.0)
"""
# load model
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
ckpt_path = '../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_path)


+ 6
- 2
tests/ut/python/reliability/concept_drift/test_concept_drift_time_series.py View File

@@ -20,6 +20,7 @@ Concept drift test.
import logging
import pytest
import numpy as np
from mindspore import context
from mindarmour import ConceptDriftCheckTimeSeries
from mindarmour.utils.logger import LogUtil

@@ -32,10 +33,13 @@ TAG = 'Concept_Test'
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cp():
def test_concept_drift_time_series_ascend():
"""
Concept drift test.
Feature: test concept drift with time series data
Description: make sure the odd detector working properly
Expectation: assert np.any(result >=0.0)
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# create data
data = 5*np.random.rand(1000)
data[200: 800] = 50*np.random.rand(600)


Loading…
Cancel
Save