You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 4.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """cnnctc eval"""
  16. import time
  17. import numpy as np
  18. from mindspore import Tensor, context
  19. import mindspore.common.dtype as mstype
  20. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  21. from mindspore.dataset import GeneratorDataset
  22. from src.util import CTCLabelConverter, AverageMeter
  23. from src.dataset import iiit_generator_batch, adv_iiit_generator_batch
  24. from src.cnn_ctc import CNNCTC
  25. from src.model_utils.config import config
  26. from src.model_utils.moxing_adapter import moxing_wrapper
  27. context.set_context(mode=context.GRAPH_MODE, save_graphs=False, save_graphs_path=".")
  28. def test_dataset_creator(is_adv=False):
  29. if is_adv:
  30. ds = GeneratorDataset(adv_iiit_generator_batch(), ['img', 'label_indices', 'text',
  31. 'sequence_length', 'label_str'])
  32. else:
  33. ds = GeneratorDataset(iiit_generator_batch, ['img', 'label_indices', 'text',
  34. 'sequence_length', 'label_str'])
  35. return ds
  36. @moxing_wrapper(pre_process=None)
  37. def test():
  38. """Eval cnn-ctc model."""
  39. target = config.device_target
  40. context.set_context(device_target=target)
  41. ds = test_dataset_creator(is_adv=config.IS_ADV)
  42. net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH)
  43. ckpt_path = config.CHECKPOINT_PATH
  44. param_dict = load_checkpoint(ckpt_path)
  45. load_param_into_net(net, param_dict)
  46. print('parameters loaded! from: ', ckpt_path)
  47. converter = CTCLabelConverter(config.CHARACTER)
  48. model_run_time = AverageMeter()
  49. npu_to_cpu_time = AverageMeter()
  50. postprocess_time = AverageMeter()
  51. count = 0
  52. correct_count = 0
  53. for data in ds.create_tuple_iterator():
  54. img, _, text, _, length = data
  55. img_tensor = Tensor(img, mstype.float32)
  56. model_run_begin = time.time()
  57. model_predict = net(img_tensor)
  58. model_run_end = time.time()
  59. model_run_time.update(model_run_end - model_run_begin)
  60. npu_to_cpu_begin = time.time()
  61. model_predict = np.squeeze(model_predict.asnumpy())
  62. npu_to_cpu_end = time.time()
  63. npu_to_cpu_time.update(npu_to_cpu_end - npu_to_cpu_begin)
  64. postprocess_begin = time.time()
  65. preds_size = np.array([model_predict.shape[1]] * config.TEST_BATCH_SIZE)
  66. preds_index = np.argmax(model_predict, 2)
  67. preds_index = np.reshape(preds_index, [-1])
  68. preds_str = converter.decode(preds_index, preds_size)
  69. postprocess_end = time.time()
  70. postprocess_time.update(postprocess_end - postprocess_begin)
  71. label_str = converter.reverse_encode(text.asnumpy(), length.asnumpy())
  72. if count == 0:
  73. model_run_time.reset()
  74. npu_to_cpu_time.reset()
  75. postprocess_time.reset()
  76. else:
  77. print('---------model run time--------', model_run_time.avg)
  78. print('---------npu_to_cpu run time--------', npu_to_cpu_time.avg)
  79. print('---------postprocess run time--------', postprocess_time.avg)
  80. print("Prediction samples: \n", preds_str[:5])
  81. print("Ground truth: \n", label_str[:5])
  82. for pred, label in zip(preds_str, label_str):
  83. if pred == label:
  84. correct_count += 1
  85. count += 1
  86. print(count)
  87. print('accuracy: ', correct_count / count)
  88. if __name__ == '__main__':
  89. test()

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。