From eb0881d3d419bae1fc806dd22cdc6ed208abda99 Mon Sep 17 00:00:00 2001 From: ZhidanLiu Date: Thu, 10 Mar 2022 18:31:58 +0800 Subject: [PATCH] add demo of ocr evaluation --- .../natural_robustness/ocr_evaluate/__init__.py | 0 .../natural_robustness/ocr_evaluate/analyse.py | 126 +++++ .../ocr_evaluate/cnn_ctc/README.md | 591 +++++++++++++++++++++ .../ocr_evaluate/cnn_ctc/README_CN.md | 523 ++++++++++++++++++ .../ocr_evaluate/cnn_ctc/eval.py | 111 ++++ .../ocr_evaluate/cnn_ctc/export.py | 51 ++ .../ocr_evaluate/cnn_ctc/mindspore_hub_conf.py | 30 ++ .../ocr_evaluate/cnn_ctc/postprocess.py | 54 ++ .../ocr_evaluate/cnn_ctc/preprocess.py | 96 ++++ .../ocr_evaluate/cnn_ctc/requirements.txt | 7 + .../cnn_ctc/scripts/run_eval_ascend.sh | 50 ++ .../ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh | 50 ++ .../cnn_ctc/scripts/run_standalone_train_ascend.sh | 49 ++ .../cnn_ctc/scripts/run_standalone_train_gpu.sh | 42 ++ .../ocr_evaluate/cnn_ctc/src/__init__.py | 15 + .../ocr_evaluate/cnn_ctc/src/callback.py | 73 +++ .../ocr_evaluate/cnn_ctc/src/cnn_ctc.py | 389 ++++++++++++++ .../ocr_evaluate/cnn_ctc/src/dataset.py | 343 ++++++++++++ .../ocr_evaluate/cnn_ctc/src/lr_schedule.py | 41 ++ .../cnn_ctc/src/model_utils/__init__.py | 0 .../ocr_evaluate/cnn_ctc/src/model_utils/config.py | 131 +++++ .../cnn_ctc/src/model_utils/device_adapter.py | 26 + .../cnn_ctc/src/model_utils/local_adapter.py | 36 ++ .../cnn_ctc/src/model_utils/moxing_adapter.py | 124 +++++ .../ocr_evaluate/cnn_ctc/src/preprocess_dataset.py | 172 ++++++ .../ocr_evaluate/cnn_ctc/src/util.py | 102 ++++ .../ocr_evaluate/cnn_ctc/train.py | 148 ++++++ .../ocr_evaluate/default_config.yaml | 76 +++ .../ocr_evaluate/eval_and_save.py | 100 ++++ .../ocr_evaluate/generate_adv_samples.py | 139 +++++ .../ocr_evaluate/image/catalog.png | Bin 0 -> 14419 bytes .../ocr_evaluate/image/name_format.png | Bin 0 -> 8025 bytes .../ocr_evaluate/image/result_demo.png | Bin 0 -> 45458 bytes .../对OCR模型CNN-CTC的鲁棒性评测.md | 508 ++++++++++++++++++ 34 files changed, 4203 insertions(+) create mode 100644 examples/natural_robustness/ocr_evaluate/__init__.py create mode 100644 examples/natural_robustness/ocr_evaluate/analyse.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/__init__.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py create mode 100644 examples/natural_robustness/ocr_evaluate/default_config.yaml create mode 100644 examples/natural_robustness/ocr_evaluate/eval_and_save.py create mode 100644 examples/natural_robustness/ocr_evaluate/generate_adv_samples.py create mode 100644 examples/natural_robustness/ocr_evaluate/image/catalog.png create mode 100644 examples/natural_robustness/ocr_evaluate/image/name_format.png create mode 100644 examples/natural_robustness/ocr_evaluate/image/result_demo.png create mode 100644 examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md diff --git a/examples/natural_robustness/ocr_evaluate/__init__.py b/examples/natural_robustness/ocr_evaluate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/natural_robustness/ocr_evaluate/analyse.py b/examples/natural_robustness/ocr_evaluate/analyse.py new file mode 100644 index 0000000..9d9464d --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/analyse.py @@ -0,0 +1,126 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Analyse result of ocr evaluation.""" + +import os +import sys +import json +from collections import defaultdict +from io import BytesIO +import lmdb +from PIL import Image + +from cnn_ctc.src.model_utils.config import config + + +def analyse_adv_iii5t_3000(lmdb_path): + """Analyse result of ocr evaluation.""" + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + + if not env: + print('cannot create lmdb from %s' % (lmdb_path)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + print(n_samples) + n_samples = n_samples // config.TEST_BATCH_SIZE * config.TEST_BATCH_SIZE + result = defaultdict(dict) + wrong_count = 0 + adv_wrong_count = 0 + ori_correct_adv_wrong_count = 0 + ori_wrong_adv_wrong_count = 0 + if not os.path.exists(os.path.join(lmdb_path, 'adv_wrong_pred')): + os.mkdir(os.path.join(lmdb_path, 'adv_wrong_pred')) + if not os.path.exists(os.path.join(lmdb_path, 'ori_correct_adv_wrong_pred')): + os.mkdir(os.path.join(lmdb_path, 'ori_correct_adv_wrong_pred')) + if not os.path.exists(os.path.join(lmdb_path, 'ori_wrong_adv_wrong_pred')): + os.mkdir(os.path.join(lmdb_path, 'ori_wrong_adv_wrong_pred')) + + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8').lower() + pred_key = 'pred-%09d'.encode() % index + pred = txn.get(pred_key).decode('utf-8') + if pred != label: + wrong_count += 1 + + adv_pred_key = 'adv_pred-%09d'.encode() % index + adv_pred = txn.get(adv_pred_key).decode('utf-8') + + adv_info_key = 'adv_info-%09d'.encode() % index + adv_info = json.loads(txn.get(adv_info_key).decode('utf-8')) + for info in adv_info: + if not result[info[0]]: + result[info[0]] = defaultdict(int) + result[info[0]]['count'] += 1 + + if adv_pred != label: + adv_wrong_count += 1 + for info in adv_info: + result[info[0]]['wrong_count'] += 1 + + # save wrong predicted image + adv_image = 'adv_image-%09d'.encode() % index + imgbuf = txn.get(adv_image) + image = Image.open(BytesIO(imgbuf)) + + result_path = os.path.join(lmdb_path, 'adv_wrong_pred', adv_info[0][0]) + if not os.path.exists(result_path): + os.mkdir(result_path) + + image.save(os.path.join(result_path, label + '-' + adv_pred + '.png')) + + # origin image is correctly predicted and adv is wrong. + if pred == label: + ori_correct_adv_wrong_count += 1 + result[info[0]]['ori_correct_adv_wrong_count'] += 1 + + result_path = os.path.join(lmdb_path, 'ori_correct_adv_wrong_pred', adv_info[0][0]) + if not os.path.exists(result_path): + os.mkdir(result_path) + image.save(os.path.join(result_path, label + '-' + adv_pred + '.png')) + # wrong predicted in both origin and adv image. + else: + ori_wrong_adv_wrong_count += 1 + result[info[0]]['ori_wrong_adv_wrong_count'] += 1 + + result_path = os.path.join(lmdb_path, 'ori_wrong_adv_wrong_pred', adv_info[0][0]) + if not os.path.exists(result_path): + os.mkdir(result_path) + image.save(os.path.join(result_path, label + '-' + adv_pred + '.png')) + print('Number of samples in analyse dataset: ', n_samples) + print('Accuracy of original dataset: ', 1 - wrong_count / n_samples) + print('Accuracy of adversarial dataset: ', 1 - adv_wrong_count / n_samples) + print('Number of samples correctly predicted in original dataset but wrong in adversarial dataset: ', + ori_correct_adv_wrong_count) + print('Number of samples both wrong predicted in original and adversarial dataset: ', ori_wrong_adv_wrong_count) + print('------------------------------------------------------------------------------') + for key in result.keys(): + print('Method ', key) + print('Number of perturb samples: {} '.format(result[key]['count'])) + print('Number of wrong predicted: {}'.format(result[key]['wrong_count'])) + print('Number of correctly predicted in origin dataset but wrong in adversarial: {}'.format( + result[key]['ori_correct_adv_wrong_count'])) + print('Number of both wrong predicted in origin and adversarial dataset: {}'.format( + result[key]['ori_wrong_adv_wrong_count'])) + print('------------------------------------------------------------------------------') + return result + + +if __name__ == '__main__': + lmdb_data_path = config.ADV_TEST_DATASET_PATH + analyse_adv_iii5t_3000(lmdb_path=lmdb_data_path) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md new file mode 100644 index 0000000..7606795 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md @@ -0,0 +1,591 @@ +# Contents + +- [CNNCTC Description](#CNNCTC-description) +- [Model Architecture](#model-architecture) +- [Dataset](#dataset) +- [Features](#features) + - [Mixed Precision](#mixed-precision) +- [Environment Requirements](#environment-requirements) +- [Quick Start](#quick-start) +- [Script Description](#script-description) + - [Script and Sample Code](#script-and-sample-code) + - [Script Parameters](#script-parameters) + - [Training Process](#training-process) + - [Training](#training) + - [Distributed Training](#distributed-training) + - [Evaluation Process](#evaluation-process) + - [Evaluation](#evaluation) + - [Inference Process](#inference-process) + - [Export MindIR](#export-mindir) + - [Infer on Ascend310](#infer-on-ascend310) + - [result](#result) +- [Model Description](#model-description) + - [Performance](#performance) + - [Training Performance](#training-performance) + - [Evaluation Performance](#evaluation-performance) + - [Inference Performance](#inference-performance) + - [How to use](#how-to-use) + - [Inference](#inference) + - [Continue Training on the Pretrained Model](#continue-training-on-the-pretrained-model) + - [Transfer Learning](#transfer-learning) +- [Description of Random Situation](#description-of-random-situation) +- [ModelZoo Homepage](#modelzoo-homepage) + +# [CNNCTC Description](#contents) + +This paper proposes three major contributions to addresses scene text recognition (STR). +First, we examine the inconsistencies of training and evaluation datasets, and the performance gap results from inconsistencies. +Second, we introduce a unified four-stage STR framework that most existing STR models fit into. +Using this framework allows for the extensive evaluation of previously proposed STR modules and the discovery of previously +unexplored module combinations. Third, we analyze the module-wise contributions to performance in terms of accuracy, speed, +and memory demand, under one consistent set of training and evaluation datasets. Such analyses clean up the hindrance on the current +comparisons to understand the performance gain of the existing modules. +[Paper](https://arxiv.org/abs/1904.01906): J. Baek, G. Kim, J. Lee, S. Park, D. Han, S. Yun, S. J. Oh, and H. Lee, “What is wrong with scene text recognition model comparisons? dataset and model analysis,” ArXiv, vol. abs/1904.01906, 2019. + +# [Model Architecture](#contents) + +This is an example of training CNN+CTC model for text recognition on MJSynth and SynthText dataset with MindSpore. + +# [Dataset](#contents) + +Note that you can run the scripts based on the dataset mentioned in original paper or widely used in relevant domain/network architecture. In the following sections, we will introduce how to run the scripts using the related dataset below. + +The [MJSynth](https://www.robots.ox.ac.uk/~vgg/data/text/) and [SynthText](https://github.com/ankush-me/SynthText) dataset are used for model training. The [The IIIT 5K-word dataset](https://cvit.iiit.ac.in/research/projects/cvit-projects/the-iiit-5k-word-dataset) dataset is used for evaluation. + +- step 1: + +All the datasets have been preprocessed and stored in .lmdb format and can be downloaded [**HERE**](https://drive.google.com/drive/folders/192UfE9agQUMNq6AgU3_E05_FcPZK4hyt). + +- step 2: + +Uncompress the downloaded file, rename the MJSynth dataset as MJ, the SynthText dataset as ST and the IIIT dataset as IIIT. + +- step 3: + +Move above mentioned three datasets into `cnnctc_data` folder, and the structure should be as below: + +```text +|--- CNNCTC/ + |--- cnnctc_data/ + |--- ST/ + data.mdb + lock.mdb + |--- MJ/ + data.mdb + lock.mdb + |--- IIIT/ + data.mdb + lock.mdb + + ...... +``` + +- step 4: + +Preprocess the dataset by running: + +```bash +python src/preprocess_dataset.py +``` + +This takes around 75 minutes. + +# [Features](#contents) + +## Mixed Precision + +The [mixed precision](https://www.mindspore.cn/docs/programming_guide/en/master/enable_mixed_precision.html) training method accelerates the deep learning neural network training process by using both the single-precision and half-precision data formats, and maintains the network precision achieved by the single-precision training at the same time. Mixed precision training can accelerate the computation process, reduce memory usage, and enable a larger model or batch size to be trained on specific hardware. +For FP16 operators, if the input data type is FP32, the backend of MindSpore will automatically handle it with reduced precision. Users could check the reduced-precision operators by enabling INFO log and then searching ‘reduce precision’. + +# [Environment Requirements](#contents) + +- Hardware(Ascend/GPU) + + - Prepare hardware environment with Ascend or GPU processor. +- Framework + + - [MindSpore](https://www.mindspore.cn/install/en) +- For more information, please check the resources below: + - [MindSpore tutorials](https://www.mindspore.cn/tutorials/en/master/index.html) + + - [MindSpore Python API](https://www.mindspore.cn/docs/api/en/master/index.html) + +# [Quick Start](#contents) + +- Install dependencies: + +```bash +pip install lmdb +pip install Pillow +pip install tqdm +pip install six +``` + +```default_config.yaml + +TRAIN_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/ST-MJ/ +TRAIN_DATASET_INDEX_PATH: /home/DataSet/MJ-ST-IIIT/st_mj_fixed_length_index_list.pkl +TEST_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/IIIT5K_3000 + +Modify the parameters according to the actual path +``` + +- Standalone Ascend Training: + +```bash +bash scripts/run_standalone_train_ascend.sh $DEVICE_ID $PRETRAINED_CKPT(options) +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +- Standalone GPU Training: + +```bash +bash scripts/run_standalone_train_gpu.sh $PRETRAINED_CKPT(options) +``` + +- Distributed Ascend Training: + +```bash +bash scripts/run_distribute_train_ascend.sh $RANK_TABLE_FILE $PRETRAINED_CKPT(options) +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + +- Distributed GPU Training: + +```bash +bash scripts/run_distribute_train_gpu.sh $PRETRAINED_CKPT(options) +``` + +- Ascend Evaluation: + +```bash +bash scripts/run_eval_ascend.sh $DEVICE_ID $TRAINED_CKPT +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +- GPU Evaluation: + +```bash +bash scripts/run_eval_gpu.sh $TRAINED_CKPT +``` + +# [Script Description](#contents) + +## [Script and Sample Code](#contents) + +The entire code structure is as following: + +```text +|--- CNNCTC/ + |---README.md // descriptions about cnnctc + |---README_cn.md // descriptions about cnnctc + |---default_config.yaml // config file + |---train.py // train scripts + |---eval.py // eval scripts + |---export.py // export scripts + |---preprocess.py // preprocess scripts + |---postprocess.py // postprocess scripts + |---ascend310_infer // application for 310 inference + |---scripts + |---run_infer_310.sh // shell script for infer on ascend310 + |---run_standalone_train_ascend.sh // shell script for standalone on ascend + |---run_standalone_train_gpu.sh // shell script for standalone on gpu + |---run_distribute_train_ascend.sh // shell script for distributed on ascend + |---run_distribute_train_gpu.sh // shell script for distributed on gpu + |---run_eval_ascend.sh // shell script for eval on ascend + |---src + |---__init__.py // init file + |---cnn_ctc.py // cnn_ctc network + |---callback.py // loss callback file + |---dataset.py // process dataset + |---util.py // routine operation + |---preprocess_dataset.py // preprocess dataset + |--- model_utils + |---config.py // Parameter config + |---moxing_adapter.py // modelarts device configuration + |---device_adapter.py // Device Config + |---local_adapter.py // local device config +``` + +## [Script Parameters](#contents) + +Parameters for both training and evaluation can be set in `default_config.yaml`. + +Arguments: + +- `--CHARACTER`: Character labels. +- `--NUM_CLASS`: The number of classes including all character labels and the label for CTCLoss. +- `--HIDDEN_SIZE`: Model hidden size. +- `--FINAL_FEATURE_WIDTH`: The number of features. +- `--IMG_H`: The height of input image. +- `--IMG_W`: The width of input image. +- `--TRAIN_DATASET_PATH`: The path to training dataset. +- `--TRAIN_DATASET_INDEX_PATH`: The path to training dataset index file which determines the order . +- `--TRAIN_BATCH_SIZE`: Training batch size. The batch size and index file must ensure input data is in fixed shape. +- `--TRAIN_DATASET_SIZE`: Training dataset size. +- `--TEST_DATASET_PATH`: The path to test dataset. +- `--TEST_BATCH_SIZE`: Test batch size. +- `--TRAIN_EPOCHS`:Total training epochs. +- `--CKPT_PATH`:The path to model checkpoint file, can be used to resume training and evaluation. +- `--SAVE_PATH`:The path to save model checkpoint file. +- `--LR`:Learning rate for standalone training. +- `--LR_PARA`:Learning rate for distributed training. +- `--MOMENTUM`:Momentum. +- `--LOSS_SCALE`:Loss scale to prevent gradient underflow. +- `--SAVE_CKPT_PER_N_STEP`:Save model checkpoint file per N steps. +- `--KEEP_CKPT_MAX_NUM`:The maximum number of saved model checkpoint file. + +## [Training Process](#contents) + +### Training + +- Standalone Ascend Training: + +```bash +bash scripts/run_standalone_train_ascend.sh [DEVICE_ID] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +Results and checkpoints are written to `./train` folder. Log can be found in `./train/log` and loss values are recorded in `./train/loss.log`. + +`$PRETRAINED_CKPT` is the path to model checkpoint and it is **optional**. If none is given the model will be trained from scratch. + +- Distributed Ascend Training: + +```bash +bash scripts/run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + + For distributed training, a hccl configuration file with JSON format needs to be created in advance. + + Please follow the instructions in the link below: + + . + +Results and checkpoints are written to `./train_parallel_{i}` folder for device `i` respectively. + Log can be found in `./train_parallel_{i}/log_{i}.log` and loss values are recorded in `./train_parallel_{i}/loss.log`. + +`$RANK_TABLE_FILE` is needed when you are running a distribute task on ascend. +`$PATH_TO_CHECKPOINT` is the path to model checkpoint and it is **optional**. If none is given the model will be trained from scratch. + +### Training Result + +Training result will be stored in the example path, whose folder name begins with "train" or "train_parallel". You can find checkpoint file together with result like the following in loss.log. + +```text +# distribute training result(8p) +epoch: 1 step: 1 , loss is 76.25, average time per step is 0.235177839748392712 +epoch: 1 step: 2 , loss is 73.46875, average time per step is 0.25798572540283203 +epoch: 1 step: 3 , loss is 69.46875, average time per step is 0.229678678512573 +epoch: 1 step: 4 , loss is 64.3125, average time per step is 0.23512671788533527 +epoch: 1 step: 5 , loss is 58.375, average time per step is 0.23149147033691406 +epoch: 1 step: 6 , loss is 52.7265625, average time per step is 0.2292975425720215 +... +epoch: 1 step: 8689 , loss is 9.706798802612482, average time per step is 0.2184656601312549 +epoch: 1 step: 8690 , loss is 9.70612545289855, average time per step is 0.2184725407765116 +epoch: 1 step: 8691 , loss is 9.70695776049204, average time per step is 0.21847309686135555 +epoch: 1 step: 8692 , loss is 9.707279624277456, average time per step is 0.21847339290613375 +epoch: 1 step: 8693 , loss is 9.70763437950938, average time per step is 0.2184720295013031 +epoch: 1 step: 8694 , loss is 9.707695425072046, average time per step is 0.21847410284595573 +epoch: 1 step: 8695 , loss is 9.708408273381295, average time per step is 0.21847338271072345 +epoch: 1 step: 8696 , loss is 9.708703753591953, average time per step is 0.2184726025560777 +epoch: 1 step: 8697 , loss is 9.709536406025824, average time per step is 0.21847212061114694 +epoch: 1 step: 8698 , loss is 9.708542263610315, average time per step is 0.2184715309307257 +``` + +- running on ModelArts +- If you want to train the model on modelarts, you can refer to the [official guidance document] of modelarts (https://support.huaweicloud.com/modelarts/) + +```python +# Example of using distributed training dpn on modelarts : +# Data set storage method + +# ├── CNNCTC_Data # dataset dir +# ├──train # train dir +# ├── ST_MJ # train dataset dir +# ├── data.mdb # data file +# ├── lock.mdb +# ├── st_mj_fixed_length_index_list.pkl +# ├── eval # eval dir +# ├── IIIT5K_3000 # eval dataset dir +# ├── checkpoint # checkpoint dir + +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters) 。 +# a. set "enable_modelarts=True" +# set "run_distribute=True" +# set "TRAIN_DATASET_PATH=/cache/data/ST_MJ/" +# set "TRAIN_DATASET_INDEX_PATH=/cache/data/st_mj_fixed_length_index_list.pkl" +# set "SAVE_PATH=/cache/train/checkpoint" +# +# b. add "enable_modelarts=True" Parameters are on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted + +# (2) Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/cnnctc"。 +# (4) Set the model's startup file on the modelarts interface "train.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/train"(choices CNNCTC_Data/train Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +# (6) start trainning the model。 + +# Example of using model inference on modelarts +# (1) Place the trained model to the corresponding position of the bucket。 +# (2) chocie a or b。 +# a.set "enable_modelarts=True" +# set "TEST_DATASET_PATH=/cache/data/IIIT5K_3000/" +# set "CHECKPOINT_PATH=/cache/data/checkpoint/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted + +# (3) Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (4) Set the code path on the modelarts interface "/path/cnnctc"。 +# (5) Set the model's startup file on the modelarts interface "train.py" 。 +# (6) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/train"(choices CNNCTC_Data/train Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +# (7) Start model inference。 +``` + +- Standalone GPU Training: + +```bash +bash scripts/run_standalone_train_gpu.sh [PRETRAINED_CKPT(options)] +``` + +Results and checkpoints are written to `./train` folder. Log can be found in `./train/log` and loss values are recorded in `./train/loss.log`. + +`$PRETRAINED_CKPT` is the path to model checkpoint and it is **optional**. If none is given the model will be trained from scratch. + +- Distributed GPU Training: + +```bash +bash scripts/run_distribute_train_gpu.sh [PRETRAINED_CKPT(options)] +``` + +Results and checkpoints are written to `./train_parallel` folder with model checkpoints in ckpt_{i} directories. +Log can be found in `./train_parallel/log` and loss values are recorded in `./train_parallel/loss.log`. + +## [Evaluation Process](#contents) + +### Evaluation + +- Ascend Evaluation: + +```bash +bash scripts/run_eval_ascend.sh [DEVICE_ID] [TRAINED_CKPT] +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +The model will be evaluated on the IIIT dataset, sample results and overall accuracy will be printed. + +- GPU Evaluation: + +```bash +bash scripts/run_eval_gpu.sh [TRAINED_CKPT] +``` + +## [Inference process](#contents) + +### Export MindIR + +```shell +python export.py --ckpt_file [CKPT_PATH] --file_format [EXPORT_FORMAT] --TEST_BATCH_SIZE [BATCH_SIZE] +``` + +The ckpt_file parameter is required, +`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]. +`BATCH_SIZE` current batch_size can only be set to 1. + +- Export MindIR on Modelarts + +```Modelarts +Export MindIR example on ModelArts +Data storage method is the same as training +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 +# a. set "enable_modelarts=True" +# set "file_name=cnnctc" +# set "file_format=MINDIR" +# set "ckpt_file=/cache/data/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted +# (2)Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/cnnctc"。 +# (4) Set the model's startup file on the modelarts interface "export.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/eval/checkpoint"(choices CNNCTC_Data/eval/checkpoint Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +``` + +### Infer on Ascend310 + +Before performing inference, the mindir file must be exported by `export.py` script. We only provide an example of inference using MINDIR model. + +```shell +# Ascend310 inference +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [DEVICE_ID] +``` + +- `DVPP` is mandatory, and must choose from ["DVPP", "CPU"], it's case-insensitive. CNNCTC only support CPU mode . +- `DEVICE_ID` is optional, default value is 0. + +### Result + +- Ascend Result + +Inference result is saved in current path, you can find result like this in acc.log file. + +```bash +'Accuracy': 0.8642 +``` + +- GPU result + +Inference result is saved in ./eval/log, you can find result like this. + +```bash +accuracy: 0.8533 +``` + +# [Model Description](#contents) + +## [Performance](#contents) + +### Training Performance + +| Parameters | CNNCTC | +| -------------------------- | ----------------------------------------------------------- | +| Model Version | V1 | +| Resource | Ascend 910; CPU 2.60GHz, 192cores; Memory 755G; OS Euler2.8 | +| uploaded Date | 09/28/2020 (month/day/year) | +| MindSpore Version | 1.0.0 | +| Dataset | MJSynth,SynthText | +| Training Parameters | epoch=3, batch_size=192 | +| Optimizer | RMSProp | +| Loss Function | CTCLoss | +| Speed | 1pc: 250 ms/step; 8pcs: 260 ms/step | +| Total time | 1pc: 15 hours; 8pcs: 1.92 hours | +| Parameters (M) | 177 | +| Scripts | | + +| Parameters | CNNCTC | +| -------------------------- | ----------------------------------------------------------- | +| Model Version | V1 | +| Resource | GPU(Tesla V100-PCIE); CPU 2.60 GHz, 26 cores; Memory 790G; OS linux-gnu | +| uploaded Date | 07/06/2021 (month/day/year) | +| MindSpore Version | 1.0.0 | +| Dataset | MJSynth,SynthText | +| Training Parameters | epoch=3, batch_size=192 | +| Optimizer | RMSProp | +| Loss Function | CTCLoss | +| Speed | 1pc: 1180 ms/step; 8pcs: 1180 ms/step | +| Total time | 1pc: 62.9 hours; 8pcs: 8.67 hours | +| Parameters (M) | 177 | +| Scripts | | + +### Evaluation Performance + +| Parameters | CNNCTC | +| ------------------- | --------------------------- | +| Model Version | V1 | +| Resource | Ascend 910; OS Euler2.8 | +| Uploaded Date | 09/28/2020 (month/day/year) | +| MindSpore Version | 1.0.0 | +| Dataset | IIIT5K | +| batch_size | 192 | +| outputs | Accuracy | +| Accuracy | 85% | +| Model for inference | 675M (.ckpt file) | + +### Inference Performance + +| Parameters | Ascend | +| ------------------- | --------------------------- | +| Model Version | CNNCTC | +| Resource | Ascend 310; CentOS 3.10 | +| Uploaded Date | 19/05/2021 (month/day/year) | +| MindSpore Version | 1.2.0 | +| Dataset | IIIT5K | +| batch_size | 1 | +| outputs | Accuracy | +| Accuracy | Accuracy=0.8642 | +| Model for inference | 675M(.ckpt file) | + +## [How to use](#contents) + +### Inference + +If you need to use the trained model to perform inference on multiple hardware platforms, such as GPU, Ascend 910 or Ascend 310, you can refer to this [Link](https://www.mindspore.cn/docs/programming_guide/en/master/multi_platform_inference.html). Following the steps below, this is a simple example: + +- Running on Ascend + + ```python + # Set context + context.set_context(mode=context.GRAPH_HOME, device_target=cfg.device_target) + context.set_context(device_id=cfg.device_id) + + # Load unseen dataset for inference + dataset = dataset.create_dataset(cfg.data_path, 1, False) + + # Define model + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, + cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) + + # Load pre-trained model + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + net.set_train(False) + + # Make predictions on the unseen dataset + acc = model.eval(dataset) + print("accuracy: ", acc) + ``` + +### Continue Training on the Pretrained Model + +- running on Ascend + + ```python + # Load dataset + dataset = create_dataset(cfg.data_path, 1) + batch_num = dataset.get_dataset_size() + + # Define model + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + # Continue training if set pre_trained to be True + if cfg.pre_trained: + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + lr = lr_steps(0, lr_max=cfg.lr_init, total_epochs=cfg.epoch_size, + steps_per_epoch=batch_num) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), + Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, + amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) + + # Set callbacks + config_ck = CheckpointConfig(save_checkpoint_steps=batch_num * 5, + keep_checkpoint_max=cfg.keep_checkpoint_max) + time_cb = TimeMonitor(data_size=batch_num) + ckpoint_cb = ModelCheckpoint(prefix="train_googlenet_cifar10", directory="./", + config=config_ck) + loss_cb = LossMonitor() + + # Start training + model.train(cfg.epoch_size, dataset, callbacks=[time_cb, ckpoint_cb, loss_cb]) + print("train success") + ``` + +# [ModelZoo Homepage](#contents) + + Please check the official [homepage](https://gitee.com/mindspore/models). diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md new file mode 100644 index 0000000..31b0e62 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md @@ -0,0 +1,523 @@ +# 目录 + + + +- [目录](#目录) +- [CNN+CTC描述](#cnnctc描述) +- [模型架构](#模型架构) +- [数据集](#数据集) +- [特性](#特性) + - [混合精度](#混合精度) +- [环境要求](#环境要求) +- [快速入门](#快速入门) +- [脚本说明](#脚本说明) + - [脚本及样例代码](#脚本及样例代码) + - [脚本参数](#脚本参数) + - [训练过程](#训练过程) + - [训练](#训练) + - [训练结果](#训练结果) + - [评估过程](#评估过程) + - [评估](#评估) + - [推理过程](#推理过程) + - [导出MindIR](#导出mindir) + - [在Ascend310执行推理](#在ascend310执行推理) + - [结果](#结果) +- [模型描述](#模型描述) + - [性能](#性能) + - [训练性能](#训练性能) + - [评估性能](#评估性能) + - [推理性能](#推理性能) + - [用法](#用法) + - [推理](#推理) + - [在预训练模型上继续训练](#在预训练模型上继续训练) +- [ModelZoo主页](#modelzoo主页) + + + +# CNN+CTC描述 + +本文描述了对场景文本识别(STR)的三个主要贡献。 +首先检查训练和评估数据集不一致的内容,以及导致的性能差距。 +再引入一个统一的四阶段STR框架,目前大多数STR模型都能够适应这个框架。 +使用这个框架可以广泛评估以前提出的STR模块,并发现以前未开发的模块组合。 +第三,分析在一致的训练和评估数据集下,模块对性能的贡献,包括准确率、速度和内存需求。 +这些分析清除了当前比较的障碍,有助于了解现有模块的性能增益。 + +[论文](https://arxiv.org/abs/1904.01906): J. Baek, G. Kim, J. Lee, S. Park, D. Han, S. Yun, S. J. Oh, and H. Lee, “What is wrong with scene text recognition model comparisons? dataset and model analysis,” ArXiv, vol. abs/1904.01906, 2019. + +# 模型架构 + +示例:在MindSpore上使用MJSynth和SynthText数据集训练CNN+CTC模型进行文本识别。 + +# 数据集 + +[MJSynth](https://www.robots.ox.ac.uk/~vgg/data/text/)和[SynthText](https://github.com/ankush-me/SynthText)数据集用于模型训练。[The IIIT 5K-word dataset](https://cvit.iiit.ac.in/research/projects/cvit-projects/the-iiit-5k-word-dataset)数据集用于评估。 + +- 步骤1: + +所有数据集均经过预处理,以.lmdb格式存储,点击[**此处**](https://drive.google.com/drive/folders/192UfE9agQUMNq6AgU3_E05_FcPZK4hyt)可下载。 + +- 步骤2: + +解压下载的文件,重命名MJSynth数据集为MJ,SynthText数据集为ST,IIIT数据集为IIIT。 + +- 步骤3: + +将上述三个数据集移至`cnctc_data`文件夹中,结构如下: + +```python +|--- CNNCTC/ + |--- cnnctc_data/ + |--- ST/ + data.mdb + lock.mdb + |--- MJ/ + data.mdb + lock.mdb + |--- IIIT/ + data.mdb + lock.mdb + + ...... +``` + +- 步骤4: + +预处理数据集: + +```shell +python src/preprocess_dataset.py +``` + +这大约需要75分钟。 + +# 特性 + +## 混合精度 + +采用[混合精度](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/enable_mixed_precision.html)的训练方法使用支持单精度和半精度数据来提高深度学习神经网络的训练速度,同时保持单精度训练所能达到的网络精度。混合精度训练提高计算速度、减少内存使用的同时,支持在特定硬件上训练更大的模型或实现更大批次的训练。 +以FP16算子为例,如果输入数据类型为FP32,MindSpore后台会自动降低精度来处理数据。用户可打开INFO日志,搜索“reduce precision”查看精度降低的算子。 + +# 环境要求 + +- 硬件(Ascend) + + - 准备Ascend或GPU处理器搭建硬件环境。 + +- 框架 + + - [MindSpore](https://www.mindspore.cn/install) + +- 如需查看详情,请参见如下资源: + - [MindSpore教程](https://www.mindspore.cn/tutorials/zh-CN/master/index.html) + + - [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html) + +# 快速入门 + +- 安装依赖: + +```python +pip install lmdb +pip install Pillow +pip install tqdm +pip install six +``` + +```default_config.yaml + +TRAIN_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/ST-MJ/ +TRAIN_DATASET_INDEX_PATH: /home/DataSet/MJ-ST-IIIT/st_mj_fixed_length_index_list.pkl +TEST_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/IIIT5K_3000 + +根据实际路径修改参数 +``` + +- 单机训练: + +```shell +bash scripts/run_standalone_train_ascend.sh [DEVICE_ID] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +- 分布式训练: + +```shell +bash scripts/run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + +- 评估: + +```shell +bash scripts/run_eval_ascend.sh DEVICE_ID TRAINED_CKPT +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +# 脚本说明 + +## 脚本及样例代码 + +完整代码结构如下: + +```python +|--- CNNCTC/ + |---README_CN.md // CNN+CTC相关描述 + |---README.md // CNN+CTC相关描述 + |---train.py // 训练脚本 + |---eval.py // 评估脚本 + |---export.py // 模型导出脚本 + |---postprocess.py // 推理后处理脚本 + |---preprocess.py // 推理前处理脚本 + |---ascend310_infer // 用于310推理 + |---default_config.yaml // 参数配置 + |---scripts + |---run_standalone_train_ascend.sh // Ascend单机shell脚本 + |---run_distribute_train_ascend.sh // Ascend分布式shell脚本 + |---run_eval_ascend.sh // Ascend评估shell脚本 + |---run_infer_310.sh // Ascend310推理的shell脚本 + |---src + |---__init__.py // init文件 + |---cnn_ctc.py // cnn_ctc网络 + |---callback.py // 损失回调文件 + |---dataset.py // 处理数据集 + |---util.py // 常规操作 + |---generate_hccn_file.py // 生成分布式json文件 + |---preprocess_dataset.py // 预处理数据集 + |---model_utils + |---config.py # 参数生成 + |---device_adapter.py # 设备相关信息 + |---local_adapter.py # 设备相关信息 + |---moxing_adapter.py # 装饰器(主要用于ModelArts数据拷贝) + +``` + +## 脚本参数 + +在`default_config.yaml`中可以同时配置训练参数和评估参数。 + +参数: + +- `--CHARACTER`:字符标签。 +- `--NUM_CLASS`:类别数,包含所有字符标签和CTCLoss的标签。 +- `--HIDDEN_SIZE`:模型隐藏大小。 +- `--FINAL_FEATURE_WIDTH`:特性的数量。 +- `--IMG_H`:输入图像高度。 +- `--IMG_W`:输入图像宽度。 +- `--TRAIN_DATASET_PATH`:训练数据集的路径。 +- `--TRAIN_DATASET_INDEX_PATH`:决定顺序的训练数据集索引文件的路径。 +- `--TRAIN_BATCH_SIZE`:训练批次大小。在批次大小和索引文件中,必须确保输入数据是固定的形状。 +- `--TRAIN_DATASET_SIZE`:训练数据集大小。 +- `--TEST_DATASET_PATH`:测试数据集的路径。 +- `--TEST_BATCH_SIZE`:测试批次大小。 +- `--TRAIN_EPOCHS`:总训练轮次。 +- `--CKPT_PATH`:模型检查点文件路径,可用于恢复训练和评估。 +- `--SAVE_PATH`:模型检查点文件保存路径。 +- `--LR`:单机训练学习率。 +- `--LR_PARA`:分布式训练学习率。 +- `--Momentum`:动量。 +- `--LOSS_SCALE`:损失放大,避免梯度下溢。 +- `--SAVE_CKPT_PER_N_STEP`:每N步保存模型检查点文件。 +- `--KEEP_CKPT_MAX_NUM`:模型检查点文件保存数量上限。 + +## 训练过程 + +### 训练 + +- 单机训练: + +```shell +bash scripts/run_standalone_train_ascend.sh [DEVICE_ID] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +结果和检查点被写入`./train`文件夹。日志可以在`./train/log`中找到,损失值记录在`./train/loss.log`中。 + +`$PRETRAINED_CKPT`为模型检查点的路径,**可选**。如果值为none,模型将从头开始训练。 + +- 分布式训练: + +```shell +bash scripts/run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + +结果和检查点分别写入设备`i`的`./train_parallel_{i}`文件夹。 +日志可以在`./train_parallel_{i}/log_{i}.log`中找到,损失值记录在`./train_parallel_{i}/loss.log`中。 + +在Ascend上运行分布式任务时需要`$RANK_TABLE_FILE`。 +`$PATH_TO_CHECKPOINT`为模型检查点的路径,**可选**。如果值为none,模型将从头开始训练。 + +> 注意: + + RANK_TABLE_FILE相关参考资料见[链接](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/distributed_training_ascend.html), 获取device_ip方法详见[链接](https://gitee.com/mindspore/models/tree/master/utils/hccl_tools). + +### 训练结果 + +训练结果保存在示例路径中,文件夹名称以“train”或“train_parallel”开头。您可在此路径下的日志中找到检查点文件以及结果,如下所示。 + +```python +# 分布式训练结果(8P) +epoch: 1 step: 1 , loss is 76.25, average time per step is 0.335177839748392712 +epoch: 1 step: 2 , loss is 73.46875, average time per step is 0.36798572540283203 +epoch: 1 step: 3 , loss is 69.46875, average time per step is 0.3429678678512573 +epoch: 1 step: 4 , loss is 64.3125, average time per step is 0.33512671788533527 +epoch: 1 step: 5 , loss is 58.375, average time per step is 0.33149147033691406 +epoch: 1 step: 6 , loss is 52.7265625, average time per step is 0.3292975425720215 +... +epoch: 1 step: 8689 , loss is 9.706798802612482, average time per step is 0.3184656601312549 +epoch: 1 step: 8690 , loss is 9.70612545289855, average time per step is 0.3184725407765116 +epoch: 1 step: 8691 , loss is 9.70695776049204, average time per step is 0.31847309686135555 +epoch: 1 step: 8692 , loss is 9.707279624277456, average time per step is 0.31847339290613375 +epoch: 1 step: 8693 , loss is 9.70763437950938, average time per step is 0.3184720295013031 +epoch: 1 step: 8694 , loss is 9.707695425072046, average time per step is 0.31847410284595573 +epoch: 1 step: 8695 , loss is 9.708408273381295, average time per step is 0.31847338271072345 +epoch: 1 step: 8696 , loss is 9.708703753591953, average time per step is 0.3184726025560777 +epoch: 1 step: 8697 , loss is 9.709536406025824, average time per step is 0.31847212061114694 +epoch: 1 step: 8698 , loss is 9.708542263610315, average time per step is 0.3184715309307257 +``` + +## 评估过程 + +### 评估 + +- 评估: + +```shell +bash scripts/run_eval_ascend.sh [DEVICE_ID] [TRAINED_CKPT] +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +在IIIT数据集上评估模型,并打印样本结果和总准确率。 + +- 如果要在modelarts上进行模型的训练,可以参考modelarts的[官方指导文档](https://support.huaweicloud.com/modelarts/) 开始进行模型的训练和推理,具体操作如下: + +```ModelArts +# 在ModelArts上使用分布式训练示例: +# 数据集存放方式 + +# ├── CNNCTC_Data # dataset dir +# ├──train # train dir +# ├── ST_MJ # train dataset dir +# ├── data.mdb # data file +# ├── lock.mdb +# ├── st_mj_fixed_length_index_list.pkl +# ├── eval # eval dir +# ├── IIIT5K_3000 # eval dataset dir +# ├── checkpoint # checkpoint dir + +# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 +# a. 设置 "enable_modelarts=True" +# 设置 "run_distribute=True" +# 设置 "TRAIN_DATASET_PATH=/cache/data/ST_MJ/" +# 设置 "TRAIN_DATASET_INDEX_PATH=/cache/data/st_mj_fixed_length_index_list.pkl" +# 设置 "SAVE_PATH=/cache/train/checkpoint" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 + +# (2)设置网络配置文件的路径 "_config_path=/The path of config in default_config.yaml/" +# (3) 在modelarts的界面上设置代码的路径 "/path/cnnctc"。 +# (4) 在modelarts的界面上设置模型的启动文件 "train.py" 。 +# (5) 在modelarts的界面上设置模型的数据路径 ".../CNNCTC_Data/train"(选择CNNCTC_Data/train文件夹路径) , +# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +# (6) 开始模型的训练。 + +# 在modelarts上使用模型推理的示例 +# (1) 把训练好的模型地方到桶的对应位置。 +# (2) 选择a或者b其中一种方式。 +# a.设置 "enable_modelarts=True" +# 设置 "TEST_DATASET_PATH=/cache/data/IIIT5K_3000/" +# 设置 "CHECKPOINT_PATH=/cache/data/checkpoint/checkpoint file name" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 + +# (3) 设置网络配置文件的路径 "_config_path=/The path of config in default_config.yaml/" +# (4) 在modelarts的界面上设置代码的路径 "/path/cnnctc"。 +# (5) 在modelarts的界面上设置模型的启动文件 "eval.py" 。 +# (6) 在modelarts的界面上设置模型的数据路径 "../CNNCTC_Data/eval"(选择CNNCTC_Data/eval文件夹路径) , +# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +# (7) 开始模型的推理。 +``` + +## 推理过程 + +### 导出MindIR + +```shell +python export.py --ckpt_file [CKPT_PATH] --file_format [EXPORT_FORMAT] --TEST_BATCH_SIZE [BATCH_SIZE] +``` + +参数ckpt_file为必填项, +`EXPORT_FORMAT` 可选 ["AIR", "MINDIR"]. +`BATCH_SIZE` 目前仅支持batch_size为1的推理. + +- 在modelarts上导出MindIR + +```Modelarts +在ModelArts上导出MindIR示例 +数据集存放方式同Modelart训练 +# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 +# a. 设置 "enable_modelarts=True" +# 设置 "file_name=cnnctc" +# 设置 "file_format=MINDIR" +# 设置 "ckpt_file=/cache/data/checkpoint file name" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 +# (2)设置网络配置文件的路径 "_config_path=/The path of config in default_config.yaml/" +# (3) 在modelarts的界面上设置代码的路径 "/path/cnnctc"。 +# (4) 在modelarts的界面上设置模型的启动文件 "export.py" 。 +# (5) 在modelarts的界面上设置模型的数据路径 ".../CNNCTC_Data/eval/checkpoint"(选择CNNCTC_Data/eval/checkpoint文件夹路径) , +# MindIR的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +``` + +### 在Ascend310执行推理 + +在执行推理前,mindir文件必须通过`export.py`脚本导出。以下展示了使用mindir模型执行推理的示例。 + +```shell +# Ascend310 inference +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [DEVICE_ID] +``` + +- `DVPP` 为必填项,需要在["DVPP", "CPU"]选择,大小写均可。CNNCTC目前仅支持使用CPU算子进行推理。 +- `DEVICE_ID` 可选,默认值为0。 + +### 结果 + +推理结果保存在脚本执行的当前路径,你可以在acc.log中看到以下精度计算结果。 + +```bash +'Accuracy':0.8642 +``` + +# 模型描述 + +## 性能 + +### 训练性能 + +| 参数 | CNNCTC | +| -------------------------- | ----------------------------------------------------------- | +| 模型版本 | V1 | +| 资源 | Ascend 910;CPU 2.60GHz,192核;内存:755G | +| 上传日期 | 2020-09-28 | +| MindSpore版本 | 1.0.0 | +| 数据集 | MJSynth、SynthText | +| 训练参数 | epoch=3, batch_size=192 | +| 优化器 | RMSProp | +| 损失函数 | CTCLoss | +| 速度 | 1卡:300毫秒/步;8卡:310毫秒/步 | +| 总时间 | 1卡:18小时;8卡:2.3小时 | +| 参数(M) | 177 | +| 脚本 | | + +### 评估性能 + +| 参数 | CNNCTC | +| ------------------- | --------------------------- | +| 模型版本 | V1 | +| 资源 | Ascend 910 | +| 上传日期 | 2020-09-28 | +| MindSpore版本 | 1.0.0 | +| 数据集 | IIIT5K | +| batch_size | 192 | +| 输出 |准确率 | +| 准确率 | 85% | +| 推理模型 | 675M(.ckpt文件) | + +### 推理性能 + +| 参数 | Ascend | +| -------------- | ---------------------------| +| 模型版本 | CNNCTC | +| 资源 | Ascend 310;系统 CentOS 3.10 | +| 上传日期 | 2021-05-19 | +| MindSpore版本 | 1.2.0 | +| 数据集 | IIIT5K | +| batch_size | 1 | +| 输出 | Accuracy | +| 准确率 | Accuracy=0.8642 | +| 推理模型 | 675M(.ckpt文件) | + +## 用法 + +### 推理 + +如果您需要在GPU、Ascend 910、Ascend 310等多个硬件平台上使用训练好的模型进行推理,请参考此[链接](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/multi_platform_inference.html)。以下为简单示例: + +- Ascend处理器环境运行 + + ```python + # 设置上下文 + context.set_context(mode=context.GRAPH_HOME, device_target=cfg.device_target) + context.set_context(device_id=cfg.device_id) + + # 加载未知数据集进行推理 + dataset = dataset.create_dataset(cfg.data_path, 1, False) + + # 定义模型 + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, + cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) + + # 加载预训练模型 + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + net.set_train(False) + + # Make predictions on the unseen dataset + acc = model.eval(dataset) + print("accuracy: ", acc) + ``` + +### 在预训练模型上继续训练 + +- Ascend处理器环境运行 + + ```python + # 加载数据集 + dataset = create_dataset(cfg.data_path, 1) + batch_num = dataset.get_dataset_size() + + # 定义模型 + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + # 如果pre_trained为True,则继续训练 + if cfg.pre_trained: + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + lr = lr_steps(0, lr_max=cfg.lr_init, total_epochs=cfg.epoch_size, + steps_per_epoch=batch_num) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), + Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, + amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) + + # 设置回调 + config_ck = CheckpointConfig(save_checkpoint_steps=batch_num * 5, + keep_checkpoint_max=cfg.keep_checkpoint_max) + time_cb = TimeMonitor(data_size=batch_num) + ckpoint_cb = ModelCheckpoint(prefix="train_googlenet_cifar10", directory="./", + config=config_ck) + loss_cb = LossMonitor() + + # 开始训练 + model.train(cfg.epoch_size, dataset, callbacks=[time_cb, ckpoint_cb, loss_cb]) + print("train success") + ``` + +# ModelZoo主页 + + 请浏览官网[主页](https://gitee.com/mindspore/models)。 diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py new file mode 100644 index 0000000..d528938 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py @@ -0,0 +1,111 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnnctc eval""" + +import time +import numpy as np +from mindspore import Tensor, context +import mindspore.common.dtype as mstype +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.dataset import GeneratorDataset +from src.util import CTCLabelConverter, AverageMeter +from src.dataset import iiit_generator_batch, adv_iiit_generator_batch +from src.cnn_ctc import CNNCTC +from src.model_utils.config import config +from src.model_utils.moxing_adapter import moxing_wrapper + +context.set_context(mode=context.GRAPH_MODE, save_graphs=False, save_graphs_path=".") + + +def test_dataset_creator(is_adv=False): + if is_adv: + ds = GeneratorDataset(adv_iiit_generator_batch(), ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + + else: + ds = GeneratorDataset(iiit_generator_batch, ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + return ds + + +@moxing_wrapper(pre_process=None) +def test(): + """Eval cnn-ctc model.""" + target = config.device_target + context.set_context(device_target=target) + + ds = test_dataset_creator(is_adv=config.IS_ADV) + + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + ckpt_path = config.CHECKPOINT_PATH + param_dict = load_checkpoint(ckpt_path) + load_param_into_net(net, param_dict) + print('parameters loaded! from: ', ckpt_path) + + converter = CTCLabelConverter(config.CHARACTER) + + model_run_time = AverageMeter() + npu_to_cpu_time = AverageMeter() + postprocess_time = AverageMeter() + + count = 0 + correct_count = 0 + for data in ds.create_tuple_iterator(): + img, _, text, _, length = data + + img_tensor = Tensor(img, mstype.float32) + + model_run_begin = time.time() + model_predict = net(img_tensor) + model_run_end = time.time() + model_run_time.update(model_run_end - model_run_begin) + + npu_to_cpu_begin = time.time() + model_predict = np.squeeze(model_predict.asnumpy()) + npu_to_cpu_end = time.time() + npu_to_cpu_time.update(npu_to_cpu_end - npu_to_cpu_begin) + + postprocess_begin = time.time() + preds_size = np.array([model_predict.shape[1]] * config.TEST_BATCH_SIZE) + preds_index = np.argmax(model_predict, 2) + preds_index = np.reshape(preds_index, [-1]) + preds_str = converter.decode(preds_index, preds_size) + postprocess_end = time.time() + postprocess_time.update(postprocess_end - postprocess_begin) + + label_str = converter.reverse_encode(text.asnumpy(), length.asnumpy()) + + if count == 0: + model_run_time.reset() + npu_to_cpu_time.reset() + postprocess_time.reset() + else: + print('---------model run time--------', model_run_time.avg) + print('---------npu_to_cpu run time--------', npu_to_cpu_time.avg) + print('---------postprocess run time--------', postprocess_time.avg) + + print("Prediction samples: \n", preds_str[:5]) + print("Ground truth: \n", label_str[:5]) + for pred, label in zip(preds_str, label_str): + if pred == label: + correct_count += 1 + count += 1 + print(count) + print('accuracy: ', correct_count / count) + + +if __name__ == '__main__': + test() diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py new file mode 100644 index 0000000..065f4d1 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py @@ -0,0 +1,51 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""export checkpoint file into air, onnx, mindir models + suggest run as python export.py --filename cnnctc --file_format MINDIR --ckpt_file [ckpt file path] +""" +import os +import numpy as np +from mindspore import Tensor, context, load_checkpoint, export +import mindspore.common.dtype as mstype +from src.cnn_ctc import CNNCTC +from src.model_utils.config import config +from src.model_utils.moxing_adapter import moxing_wrapper + + +context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) +if config.device_target == "Ascend": + context.set_context(device_id=config.device_id) + + +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) + + +@moxing_wrapper(pre_process=modelarts_pre_process) +def model_export(): + """Export model.""" + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + load_checkpoint(config.ckpt_file, net=net) + + bs = config.TEST_BATCH_SIZE + + input_data = Tensor(np.zeros([bs, 3, config.IMG_H, config.IMG_W]), mstype.float32) + + export(net, input_data, file_name=config.file_name, file_format=config.file_format) + + +if __name__ == '__main__': + model_export() diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py new file mode 100644 index 0000000..2fc1a4a --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py @@ -0,0 +1,30 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""hub config""" +from src.cnn_ctc import CNNCTC +from src.config import Config_CNNCTC + +def cnnctc_net(*args, **kwargs): + return CNNCTC(*args, **kwargs) + + +def create_network(name, *args, **kwargs): + """ + create cnnctc network + """ + if name == "cnnctc": + config = Config_CNNCTC + return cnnctc_net(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH, *args, **kwargs) + raise NotImplementedError(f"{name} is not implemented in the repo") diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py new file mode 100644 index 0000000..29417ce --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py @@ -0,0 +1,54 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""post process for 310 inference""" +import os +import numpy as np +from src.model_utils.config import config +from src.util import CTCLabelConverter + + +def calcul_acc(labels, preds): + return sum(1 for x, y in zip(labels, preds) if x == y) / len(labels) + + +def get_result(result_path, label_path): + """Get result.""" + converter = CTCLabelConverter(config.CHARACTER) + files = os.listdir(result_path) + preds = [] + labels = [] + label_dict = {} + with open(label_path, 'r') as f: + lines = f.readlines() + for line in lines: + label_dict[line.split(',')[0]] = line.split(',')[1].replace('\n', '') + for file in files: + file_name = file.split('.')[0] + label = label_dict[file_name] + labels.append(label) + new_result_path = os.path.join(result_path, file) + output = np.fromfile(new_result_path, dtype=np.float32) + output = np.reshape(output, (config.FINAL_FEATURE_WIDTH, config.NUM_CLASS)) + model_predict = np.squeeze(output) + preds_size = np.array([model_predict.shape[0]] * 1) + preds_index = np.argmax(model_predict, axis=1) + preds_str = converter.decode(preds_index, preds_size) + preds.append(preds_str[0]) + acc = calcul_acc(labels, preds) + print("Total data: {}, accuracy: {}".format(len(labels), acc)) + + +if __name__ == '__main__': + get_result(config.result_path, config.label_path) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py new file mode 100644 index 0000000..4abdeea --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py @@ -0,0 +1,96 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""post process for 310 inference""" +import os +import sys +import six +import lmdb +from PIL import Image +from src.model_utils.config import config +from src.util import CTCLabelConverter + + +def get_img_from_lmdb(env_, ind): + """Get image_from lmdb.""" + with env_.begin(write=False) as txn_: + label_key = 'label-%09d'.encode() % ind + label_ = txn_.get(label_key).decode('utf-8') + img_key = 'image-%09d'.encode() % ind + imgbuf = txn_.get(img_key) + + buf = six.BytesIO() + buf.write(imgbuf) + buf.seek(0) + try: + img_ = Image.open(buf).convert('RGB') # for color image + + except IOError: + print(f'Corrupted image for {ind}') + # make dummy image and dummy label for corrupted image. + img_ = Image.new('RGB', (config.IMG_W, config.IMG_H)) + label_ = '[dummy_label]' + + label_ = label_.lower() + + return img_, label_ + + +if __name__ == '__main__': + max_len = int((26 + 1) // 2) + converter = CTCLabelConverter(config.CHARACTER) + env = lmdb.open(config.TEST_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + if not env: + print('cannot create lmdb from %s' % (config.TEST_DATASET_PATH)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + filtered_index_list = [] + for index_ in range(n_samples): + index_ += 1 # lmdb starts with 1 + label_key_ = 'label-%09d'.encode() % index_ + label = txn.get(label_key_).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: + continue + + filtered_index_list.append(index_) + + img_ret = [] + text_ret = [] + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + i = 0 + label_dict = {} + for index in filtered_index_list: + img, label = get_img_from_lmdb(env, index) + img_name = os.path.join(config.preprocess_output, str(i) + ".png") + img.save(img_name) + label_dict[str(i)] = label + i += 1 + with open('./label.txt', 'w') as file: + for k, v in label_dict.items(): + file.write(str(k) + ',' + str(v) + '\n') diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt b/examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt new file mode 100644 index 0000000..2830093 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt @@ -0,0 +1,7 @@ +lmdb +tqdm +six +numpy +pillow +pyyaml + diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh new file mode 100644 index 0000000..1572c50 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ $# -ne 2 ] +then + echo "Usage: sh scripts/run_eval_ascend.sh [DEVICE_ID] [TRAINED_CKPT]" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} + +PATH1=$(get_real_path $2) +echo $PATH1 +if [ ! -f $PATH1 ] +then + echo "error: TRAINED_CKPT=$PATH1 is not a file" +exit 1 +fi + +ulimit -u unlimited +export DEVICE_ID=$1 + +if [ -d "eval" ]; +then + rm -rf ./eval +fi +mkdir ./eval +echo "start inferring for device $DEVICE_ID" +env > ./eval/env.log +python eval.py --CHECKPOINT_PATH=$PATH1 &> ./eval/log & +#cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh new file mode 100644 index 0000000..cb6ff79 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ $# -ne 1 ] +then + echo "Usage: sh run_eval_gpu.sh [TRAINED_CKPT]" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} + +PATH1=$(get_real_path $1) +echo $PATH1 +if [ ! -f $PATH1 ] +then + echo "error: TRAINED_CKPT=$PATH1 is not a file" +exit 1 +fi + +#ulimit -u unlimited +export DEVICE_ID=0 + +if [ -d "eval" ]; +then + rm -rf ./eval +fi +mkdir ./eval +echo "start inferring for device $DEVICE_ID" +env > ./eval/env.log +python eval.py --device_target="GPU" --device_id=$DEVICE_ID --CHECKPOINT_PATH=$PATH1 &> ./eval/log & +#cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh new file mode 100644 index 0000000..f9197f5 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ $# != 1 ] && [ $# != 2 ] +then + echo "run as sh scripts/run_standalone_train_ascend.sh DEVICE_ID PRE_TRAINED(options)" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} +PATH1=$(get_real_path $2) + +export DEVICE_ID=$1 + +ulimit -u unlimited + +if [ -d "train" ]; +then + rm -rf ./train +fi +mkdir ./train +echo "start training for device $DEVICE_ID" +env > env.log +if [ -f $PATH1 ] +then + python train.py --PRED_TRAINED=$PATH1 --run_distribute=False &> log & +else + python train.py --run_distribute=False &> log & +fi +cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh new file mode 100644 index 0000000..c11410b --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} +PATH1=$(get_real_path $1) +echo $PATH1 + +export DEVICE_NUM=1 +export RANK_SIZE=1 + +if [ -d "train" ]; +then + rm -rf ./train +fi +mkdir ./train +env > ./train/env.log +if [ -f $PATH1 ] +then + python train.py --device_target="GPU" --PRED_TRAINED=$PATH1 --run_distribute=False &> log & +else + python train.py --device_target="GPU" --run_distribute=False &> ./train/log & +fi +#cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py new file mode 100644 index 0000000..8d62ac3 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""src init file""" diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py new file mode 100644 index 0000000..9b048d1 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py @@ -0,0 +1,73 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""loss callback""" + +import time +import numpy as np +from mindspore.train.callback import Callback +from .util import AverageMeter + +class LossCallBack(Callback): + """ + Monitor the loss in training. + + If the loss is NAN or INF terminating training. + + Note: + If per_print_times is 0 do not print loss. + + Args: + per_print_times (int): Print loss every times. Default: 1. + """ + + def __init__(self, per_print_times=1): + super(LossCallBack, self).__init__() + if not isinstance(per_print_times, int) or per_print_times < 0: + raise ValueError("print_step must be int and >= 0.") + self._per_print_times = per_print_times + self.loss_avg = AverageMeter() + self.timer = AverageMeter() + self.start_time = time.time() + + def step_end(self, run_context): + """step end.""" + cb_params = run_context.original_args() + + loss = np.array(cb_params.net_outputs) + + cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1 + cur_num = cb_params.cur_step_num + + if cur_step_in_epoch % 2000 == 1: + self.loss_avg = AverageMeter() + self.timer = AverageMeter() + self.start_time = time.time() + else: + self.timer.update(time.time() - self.start_time) + self.start_time = time.time() + + self.loss_avg.update(loss) + + if self._per_print_times != 0 and cur_num % self._per_print_times == 0: + loss_file = open("./loss.log", "a+") + loss_file.write("epoch: %s step: %s , loss is %s, average time per step is %s" % ( + cb_params.cur_epoch_num, cur_step_in_epoch, + self.loss_avg.avg, self.timer.avg)) + loss_file.write("\n") + loss_file.close() + + print("epoch: %s step: %s , loss is %s, average time per step is %s" % ( + cb_params.cur_epoch_num, cur_step_in_epoch, + self.loss_avg.avg, self.timer.avg)) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py new file mode 100644 index 0000000..2d1ea28 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py @@ -0,0 +1,389 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnn_ctc network define""" + +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore import Tensor, Parameter, ParameterTuple, context +from mindspore.common.initializer import TruncatedNormal, initializer +from mindspore.communication.management import get_group_size +from mindspore.context import ParallelMode +from mindspore.nn.wrap.grad_reducer import DistributedGradReducer +from mindspore.ops import composite as C +from mindspore.ops import functional as F +from mindspore.ops import operations as P + +grad_scale = C.MultitypeFuncGraph("grad_scale") +reciprocal = P.Reciprocal() + + +@grad_scale.register("Tensor", "Tensor") +def tensor_grad_scale(scale, grad): + return grad * F.cast(reciprocal(scale), F.dtype(grad)) + + +_grad_overflow = C.MultitypeFuncGraph("_grad_overflow") +grad_overflow = P.FloatStatus() + + +@_grad_overflow.register("Tensor") +def _tensor_grad_overflow(grad): + return grad_overflow(grad) + + +GRADIENT_CLIP_MIN = -64000 +GRADIENT_CLIP_MAX = 64000 + + +class ClipGradients(nn.Cell): + """ + Clip large gradients, typically generated from overflow. + """ + + def __init__(self): + super(ClipGradients, self).__init__() + self.clip_by_norm = nn.ClipByNorm() + self.cast = P.Cast() + self.dtype = P.DType() + + def construct(self, grads, clip_min, clip_max): + new_grads = () + for grad in grads: + dt = self.dtype(grad) + + t = C.clip_by_value(grad, self.cast(F.tuple_to_array((clip_min,)), dt), + self.cast(F.tuple_to_array((clip_max,)), dt)) + t = self.cast(t, dt) + new_grads = new_grads + (t,) + return new_grads + + +class CNNCTCTrainOneStepWithLossScaleCell(nn.Cell): + """ + Encapsulation class of CNNCTC network training. + Used for GPU training in order to manage overflowing gradients. + Args: + network (Cell): The training network. Note that loss function should have been added. + optimizer (Optimizer): Optimizer for updating the weights. + scale_sense (Cell): Loss scaling value. + """ + + def __init__(self, network, optimizer, scale_sense): + super(CNNCTCTrainOneStepWithLossScaleCell, self).__init__(auto_prefix=False) + self.network = network + self.optimizer = optimizer + + if isinstance(scale_sense, nn.Cell): + self.loss_scaling_manager = scale_sense + self.scale_sense = Parameter(Tensor(scale_sense.get_loss_scale(), + dtype=mstype.float32), name="scale_sense") + elif isinstance(scale_sense, Tensor): + if scale_sense.shape == (1,) or scale_sense.shape == (): + self.scale_sense = Parameter(scale_sense, name='scale_sense') + else: + raise ValueError("The shape of scale_sense must be (1,) or (), but got {}".format( + scale_sense.shape)) + else: + raise TypeError("The scale_sense must be Cell or Tensor, but got {}".format( + type(scale_sense))) + + self.network.set_grad() + self.weights = ParameterTuple(network.trainable_params()) + + self.grad = C.GradOperation(get_by_list=True, + sens_param=True) + + self.reducer_flag = False + self.parallel_mode = context.get_auto_parallel_context("parallel_mode") + if self.parallel_mode not in ParallelMode.MODE_LIST: + raise ValueError("Parallel mode does not support: ", self.parallel_mode) + if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: + self.reducer_flag = True + self.grad_reducer = None + if self.reducer_flag: + mean = context.get_auto_parallel_context("gradients_mean") + degree = get_group_size() + self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) + self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) + + self.clip_gradients = ClipGradients() + self.cast = P.Cast() + self.addn = P.AddN() + self.reshape = P.Reshape() + self.hyper_map = C.HyperMap() + self.less_equal = P.LessEqual() + self.allreduce = P.AllReduce() + + def construct(self, img, label_indices, text, sequence_length): + """model construct.""" + weights = self.weights + loss = self.network(img, label_indices, text, sequence_length) + + scaling_sens = self.scale_sense + + grads = self.grad(self.network, weights)(img, label_indices, text, sequence_length, + self.cast(scaling_sens, mstype.float32)) + + grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads) + grads = self.clip_gradients(grads, GRADIENT_CLIP_MIN, GRADIENT_CLIP_MAX) + + if self.reducer_flag: + # apply grad reducer on grads + grads = self.grad_reducer(grads) + + self.optimizer(grads) + return (loss, scaling_sens) + + +class CNNCTC(nn.Cell): + """CNNCTC model construct.""" + def __init__(self, num_class, hidden_size, final_feature_width): + super(CNNCTC, self).__init__() + + self.num_class = num_class + self.hidden_size = hidden_size + self.final_feature_width = final_feature_width + + self.feature_extraction = ResNetFeatureExtractor() + self.prediction = nn.Dense(self.hidden_size, self.num_class) + + self.transpose = P.Transpose() + self.reshape = P.Reshape() + + def construct(self, x): + x = self.feature_extraction(x) + x = self.transpose(x, (0, 3, 1, 2)) # [b, c, h, w] -> [b, w, c, h] + + x = self.reshape(x, (-1, self.hidden_size)) + x = self.prediction(x) + x = self.reshape(x, (-1, self.final_feature_width, self.num_class)) + + return x + + +class WithLossCell(nn.Cell): + """Add loss cell for network.""" + def __init__(self, backbone, loss_fn): + super(WithLossCell, self).__init__(auto_prefix=False) + self._backbone = backbone + self._loss_fn = loss_fn + + def construct(self, img, label_indices, text, sequence_length): + model_predict = self._backbone(img) + return self._loss_fn(model_predict, label_indices, text, sequence_length) + + @property + def backbone_network(self): + return self._backbone + + +class CTCLoss(nn.Cell): + """Loss of CTC.""" + def __init__(self): + super(CTCLoss, self).__init__() + + self.loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + + self.mean = P.ReduceMean() + self.transpose = P.Transpose() + self.reshape = P.Reshape() + + def construct(self, inputs, labels_indices, labels_values, sequence_length): + inputs = self.transpose(inputs, (1, 0, 2)) + + loss, _ = self.loss(inputs, labels_indices, labels_values, sequence_length) + + loss = self.mean(loss) + return loss + + +class ResNetFeatureExtractor(nn.Cell): + """Extractor of ResNet feature.""" + def __init__(self): + super(ResNetFeatureExtractor, self).__init__() + self.conv_net = ResNet(3, 512, BasicBlock, [1, 2, 5, 3]) + + def construct(self, feature_map): + return self.conv_net(feature_map) + + +class ResNet(nn.Cell): + """Network of ResNet.""" + def __init__(self, input_channel, output_channel, block, layers): + super(ResNet, self).__init__() + + self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel] + + self.inplanes = int(output_channel / 8) + self.conv0_1 = ms_conv3x3(input_channel, int(output_channel / 16), stride=1, padding=1, pad_mode='pad') + self.bn0_1 = ms_fused_bn(int(output_channel / 16)) + self.conv0_2 = ms_conv3x3(int(output_channel / 16), self.inplanes, stride=1, padding=1, pad_mode='pad') + self.bn0_2 = ms_fused_bn(self.inplanes) + self.relu = P.ReLU() + + self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='valid') + self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0]) + self.conv1 = ms_conv3x3(self.output_channel_block[0], self.output_channel_block[0], stride=1, padding=1, + pad_mode='pad') + self.bn1 = ms_fused_bn(self.output_channel_block[0]) + + self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='valid') + self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1]) + self.conv2 = ms_conv3x3(self.output_channel_block[1], self.output_channel_block[1], stride=1, padding=1, + pad_mode='pad') + self.bn2 = ms_fused_bn(self.output_channel_block[1]) + + self.pad = P.Pad(((0, 0), (0, 0), (0, 0), (2, 2))) + self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), pad_mode='valid') + self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2]) + self.conv3 = ms_conv3x3(self.output_channel_block[2], self.output_channel_block[2], stride=1, padding=1, + pad_mode='pad') + self.bn3 = ms_fused_bn(self.output_channel_block[2]) + + self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3]) + self.conv4_1 = ms_conv2x2(self.output_channel_block[3], self.output_channel_block[3], stride=(2, 1), + pad_mode='valid') + self.bn4_1 = ms_fused_bn(self.output_channel_block[3]) + + self.conv4_2 = ms_conv2x2(self.output_channel_block[3], self.output_channel_block[3], stride=1, padding=0, + pad_mode='valid') + self.bn4_2 = ms_fused_bn(self.output_channel_block[3]) + + def _make_layer(self, block, planes, blocks, stride=1): + """make layer""" + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.SequentialCell( + [ms_conv1x1(self.inplanes, planes * block.expansion, stride=stride), + ms_fused_bn(planes * block.expansion)] + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.SequentialCell(layers) + + def construct(self, x): + """model construct""" + x = self.conv0_1(x) + x = self.bn0_1(x) + x = self.relu(x) + x = self.conv0_2(x) + x = self.bn0_2(x) + x = self.relu(x) + + x = self.maxpool1(x) + x = self.layer1(x) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.maxpool2(x) + x = self.layer2(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.maxpool3(x) + x = self.layer3(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.layer4(x) + x = self.pad(x) + x = self.conv4_1(x) + x = self.bn4_1(x) + x = self.relu(x) + x = self.conv4_2(x) + x = self.bn4_2(x) + x = self.relu(x) + + return x + + +class BasicBlock(nn.Cell): + """BasicBlock""" + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + + self.conv1 = ms_conv3x3(inplanes, planes, stride=stride, padding=1, pad_mode='pad') + self.bn1 = ms_fused_bn(planes) + self.conv2 = ms_conv3x3(planes, planes, stride=stride, padding=1, pad_mode='pad') + self.bn2 = ms_fused_bn(planes) + self.relu = P.ReLU() + self.downsample = downsample + self.add = P.Add() + + def construct(self, x): + """Basic block construct""" + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + out = self.add(out, residual) + out = self.relu(out) + + return out + + +def weight_variable(shape, half_precision=False): + if half_precision: + return initializer(TruncatedNormal(0.02), shape, dtype=mstype.float16) + + return TruncatedNormal(0.02) + + +def ms_conv3x3(in_channels, out_channels, stride=1, padding=0, pad_mode='same', has_bias=False): + """Get a conv2d layer with 3x3 kernel size.""" + init_value = weight_variable((out_channels, in_channels, 3, 3)) + return nn.Conv2d(in_channels, out_channels, + kernel_size=3, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=init_value, + has_bias=has_bias) + + +def ms_conv1x1(in_channels, out_channels, stride=1, padding=0, pad_mode='same', has_bias=False): + """Get a conv2d layer with 1x1 kernel size.""" + init_value = weight_variable((out_channels, in_channels, 1, 1)) + return nn.Conv2d(in_channels, out_channels, + kernel_size=1, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=init_value, + has_bias=has_bias) + + +def ms_conv2x2(in_channels, out_channels, stride=1, padding=0, pad_mode='same', has_bias=False): + """Get a conv2d layer with 2x2 kernel size.""" + init_value = weight_variable((out_channels, in_channels, 1, 1)) + return nn.Conv2d(in_channels, out_channels, + kernel_size=2, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=init_value, + has_bias=has_bias) + + +def ms_fused_bn(channels, momentum=0.1): + """Get a fused batchnorm""" + return nn.BatchNorm2d(channels, momentum=momentum) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py new file mode 100644 index 0000000..3d78c89 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py @@ -0,0 +1,343 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnn_ctc dataset""" + +import sys +import pickle +import math +import six +import numpy as np +from PIL import Image +import lmdb +from mindspore.communication.management import get_rank, get_group_size +from src.model_utils.config import config +from src.util import CTCLabelConverter + + +class NormalizePAD: + """Normalize pad.""" + + def __init__(self, max_size, pad_type='right'): + self.max_size = max_size + self.pad_type = pad_type + + def __call__(self, img): + # toTensor + img = np.array(img, dtype=np.float32) + # normalize + means = [121.58949, 123.93914, 123.418655] + stds = [65.70353, 65.142426, 68.61079] + img = np.subtract(img, means) + img = np.true_divide(img, stds) + + img = img.transpose([2, 0, 1]) + img = img.astype(np.float) + + _, _, w = img.shape + pad_img = np.zeros(shape=self.max_size, dtype=np.float32) + pad_img[:, :, :w] = img # right pad + if self.max_size[2] != w: # add border Pad + pad_img[:, :, w:] = np.tile(np.expand_dims(img[:, :, w - 1], 2), (1, 1, self.max_size[2] - w)) + + return pad_img + + +class AlignCollate: + """Align collate""" + + def __init__(self, img_h=32, img_w=100): + self.img_h = img_h + self.img_w = img_w + + def __call__(self, images): + + resized_max_w = self.img_w + input_channel = 3 + transform = NormalizePAD((input_channel, self.img_h, resized_max_w)) + + resized_images = [] + for image in images: + w, h = image.size + ratio = w / float(h) + if math.ceil(self.img_h * ratio) > self.img_w: + resized_w = self.img_w + else: + resized_w = math.ceil(self.img_h * ratio) + + resized_image = image.resize((resized_w, self.img_h), Image.BICUBIC) + resized_images.append(transform(resized_image)) + + image_tensors = np.concatenate([np.expand_dims(t, 0) for t in resized_images], 0) + + return image_tensors + + +def get_img_from_lmdb(env, index, is_adv=False): + """get image from lmdb.""" + with env.begin(write=False) as txn: + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + if is_adv: + img_key = 'adv_image-%09d'.encode() % index + else: + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + + buf = six.BytesIO() + buf.write(imgbuf) + buf.seek(0) + try: + img = Image.open(buf).convert('RGB') # for color image + + except IOError: + print(f'Corrupted image for {index}') + # make dummy image and dummy label for corrupted image. + img = Image.new('RGB', (config.IMG_W, config.IMG_H)) + label = '[dummy_label]' + + label = label.lower() + + return img, label + + +class STMJGeneratorBatchFixedLength: + """ST_MJ Generator with Batch Fixed Length""" + + def __init__(self): + self.align_collector = AlignCollate() + self.converter = CTCLabelConverter(config.CHARACTER) + self.env = lmdb.open(config.TRAIN_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, + meminit=False) + if not self.env: + print('cannot create lmdb from %s' % (config.TRAIN_DATASET_PATH)) + raise ValueError(config.TRAIN_DATASET_PATH) + + with open(config.TRAIN_DATASET_INDEX_PATH, 'rb') as f: + self.st_mj_filtered_index_list = pickle.load(f) + + print(f'num of samples in ST_MJ dataset: {len(self.st_mj_filtered_index_list)}') + self.dataset_size = len(self.st_mj_filtered_index_list) // config.TRAIN_BATCH_SIZE + self.batch_size = config.TRAIN_BATCH_SIZE + + def __len__(self): + return self.dataset_size + + def __getitem__(self, item): + img_ret = [] + text_ret = [] + + for i in range(item * self.batch_size, (item + 1) * self.batch_size): + index = self.st_mj_filtered_index_list[i] + img, label = get_img_from_lmdb(self.env, index) + + img_ret.append(img) + text_ret.append(label) + + img_ret = self.align_collector(img_ret) + text_ret, length = self.converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([config.FINAL_FEATURE_WIDTH] * config.TRAIN_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + return img_ret, label_indices, text_ret, sequence_length + + +class STMJGeneratorBatchFixedLengthPara: + """ST_MJ Generator with batch fixed length Para""" + + def __init__(self): + self.align_collector = AlignCollate() + self.converter = CTCLabelConverter(config.CHARACTER) + self.env = lmdb.open(config.TRAIN_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, + meminit=False) + if not self.env: + print('cannot create lmdb from %s' % (config.TRAIN_DATASET_PATH)) + raise ValueError(config.TRAIN_DATASET_PATH) + + with open(config.TRAIN_DATASET_INDEX_PATH, 'rb') as f: + self.st_mj_filtered_index_list = pickle.load(f) + + print(f'num of samples in ST_MJ dataset: {len(self.st_mj_filtered_index_list)}') + self.rank_id = get_rank() + self.rank_size = get_group_size() + self.dataset_size = len(self.st_mj_filtered_index_list) // config.TRAIN_BATCH_SIZE // self.rank_size + self.batch_size = config.TRAIN_BATCH_SIZE + + def __len__(self): + return self.dataset_size + + def __getitem__(self, item): + img_ret = [] + text_ret = [] + + rank_item = (item * self.rank_size) + self.rank_id + for i in range(rank_item * self.batch_size, (rank_item + 1) * self.batch_size): + index = self.st_mj_filtered_index_list[i] + img, label = get_img_from_lmdb(self.env, index) + + img_ret.append(img) + text_ret.append(label) + + img_ret = self.align_collector(img_ret) + text_ret, length = self.converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([config.FINAL_FEATURE_WIDTH] * config.TRAIN_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + return img_ret, label_indices, text_ret, sequence_length + + +def iiit_generator_batch(): + """IIIT dataset generator""" + max_len = int((26 + 1) // 2) + + align_collector = AlignCollate() + + converter = CTCLabelConverter(config.CHARACTER) + + env = lmdb.open(config.TEST_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + if not env: + print('cannot create lmdb from %s' % (config.TEST_DATASET_PATH)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: + continue + + filtered_index_list.append(index) + + img_ret = [] + text_ret = [] + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + + for index in filtered_index_list: + img, label = get_img_from_lmdb(env, index, config.IS_ADV) + + img_ret.append(img) + text_ret.append(label) + + if len(img_ret) == config.TEST_BATCH_SIZE: + img_ret = align_collector(img_ret) + text_ret, length = converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([26] * config.TEST_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + yield img_ret, label_indices, text_ret, sequence_length, length + # return img_ret, label_indices, text_ret, sequence_length, length + + img_ret = [] + text_ret = [] + + +def adv_iiit_generator_batch(): + """Perturb IIII dataset generator.""" + max_len = int((26 + 1) // 2) + + align_collector = AlignCollate() + + converter = CTCLabelConverter(config.CHARACTER) + + env = lmdb.open(config.ADV_TEST_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, + meminit=False) + if not env: + print('cannot create lmdb from %s' % (config.ADV_TEST_DATASET_PATH)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: + continue + + filtered_index_list.append(index) + + img_ret = [] + text_ret = [] + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + + for index in filtered_index_list: + img, label = get_img_from_lmdb(env, index, is_adv=True) + + img_ret.append(img) + text_ret.append(label) + + if len(img_ret) == config.TEST_BATCH_SIZE: + img_ret = align_collector(img_ret) + text_ret, length = converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([26] * config.TEST_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + yield img_ret, label_indices, text_ret, sequence_length, length + + img_ret = [] + text_ret = [] diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py new file mode 100644 index 0000000..8ac9eac --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py @@ -0,0 +1,41 @@ +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""lr generator for cnnctc""" +import math + +def linear_warmup_learning_rate(current_step, warmup_steps, base_lr, init_lr): + lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps) + learning_rate = float(init_lr) + lr_inc * current_step + return learning_rate + +def a_cosine_learning_rate(current_step, base_lr, warmup_steps, decay_steps): + base = float(current_step - warmup_steps) / float(decay_steps) + learning_rate = (1 + math.cos(base * math.pi)) / 2 * base_lr + return learning_rate + +def dynamic_lr(config, steps_per_epoch): + """dynamic learning rate generator""" + base_lr = config.base_lr + total_steps = steps_per_epoch * config.TRAIN_EPOCHS + warmup_steps = int(config.warmup_step) + decay_steps = total_steps - warmup_steps + lr = [] + for i in range(total_steps): + if i < warmup_steps: + lr.append(linear_warmup_learning_rate(i, warmup_steps, base_lr, base_lr * config.warmup_ratio)) + else: + lr.append(a_cosine_learning_rate(i, base_lr, warmup_steps, decay_steps)) + + return lr diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/__init__.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py new file mode 100644 index 0000000..cc3a81b --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py @@ -0,0 +1,131 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Parse arguments""" +import os +import ast +import argparse +from pprint import pprint, pformat +import yaml + + +_config_path = '../../../default_config.yaml' + + +class Config: + """ + Configuration namespace. Convert dictionary to members + """ + def __init__(self, cfg_dict): + for k, v in cfg_dict.items(): + if isinstance(v, (list, tuple)): + setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v]) + else: + setattr(self, k, Config(v) if isinstance(v, dict) else v) + + def __str__(self): + return pformat(self.__dict__) + + def __repr__(self): + return self.__str__() + + +def parse_cli_to_yaml(parser, cfg, helper=None, choices=None, cfg_path='default_config.yaml'): + """ + Parse command line arguments to the configuration according to the default yaml + + Args: + parser: Parent parser + cfg: Base configuration + helper: Helper description + cfg_path: Path to the default yaml config + """ + parser = argparse.ArgumentParser(description='[REPLACE THIS at config.py]', + parents=[parser]) + helper = {} if helper is None else helper + choices = {} if choices is None else choices + for item in cfg: + if not isinstance(cfg[item], list) and not isinstance(cfg[item], dict): + help_description = helper[item] if item in helper else 'Please reference to {}'.format(cfg_path) + choice = choices[item] if item in choices else None + if isinstance(cfg[item], bool): + parser.add_argument('--' + item, type=ast.literal_eval, default=cfg[item], choices=choice, + help=help_description) + else: + parser.add_argument('--' + item, type=type(cfg[item]), default=cfg[item], choices=choice, + help=help_description) + args = parser.parse_args() + return args + + +def parse_yaml(yaml_path): + """ + Parse the yaml config file + + Args: + yaml_path: Path to the yaml config + """ + with open(yaml_path, 'r') as fin: + try: + cfgs = yaml.load_all(fin.read(), Loader=yaml.FullLoader) + cfgs = [x for x in cfgs] + if len(cfgs) == 1: + cfg_helper = {} + cfg = cfgs[0] + cfg_choices = {} + elif len(cfgs) == 2: + cfg, cfg_helper = cfgs + cfg_choices = {} + elif len(cfgs) == 3: + cfg, cfg_helper, cfg_choices = cfgs + else: + raise ValueError('At most 3 docs (config description for help, choices) are supported in config yaml') + print(cfg_helper) + except: + raise ValueError('Failed to parse yaml') + return cfg, cfg_helper, cfg_choices + + +def merge(args, cfg): + """ + Merge the base config from yaml file and command line arguments + + Args: + args: command line arguments + cfg: Base configuration + """ + args_var = vars(args) + for item in args_var: + cfg[item] = args_var[item] + return cfg + + +def get_config(): + """ + Get Config according to the yaml file and cli arguments + """ + parser = argparse.ArgumentParser(description='default name', add_help=False) + current_dir = os.path.dirname(os.path.abspath(__file__)) + parser.add_argument('--config_path', type=str, default=os.path.join(current_dir, _config_path), + help='Config file path') + path_args, _ = parser.parse_known_args() + default, helper, choices = parse_yaml(path_args.config_path) + args = parse_cli_to_yaml(parser=parser, cfg=default, helper=helper, choices=choices, cfg_path=path_args.config_path) + final_config = merge(args, default) + pprint(final_config) + print("Please check the above information for the configurations", flush=True) + return Config(final_config) + +config = get_config() diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py new file mode 100644 index 0000000..ad8415a --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py @@ -0,0 +1,26 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Device adapter for ModelArts""" + +from .config import config +if config.enable_modelarts: + from .moxing_adapter import get_device_id, get_device_num, get_rank_id, get_job_id +else: + from .local_adapter import get_device_id, get_device_num, get_rank_id, get_job_id + +__all__ = [ + 'get_device_id', 'get_device_num', 'get_job_id', 'get_rank_id' +] diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py new file mode 100644 index 0000000..4ff88c4 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py @@ -0,0 +1,36 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Local adapter""" + +import os + +def get_device_id(): + device_id = os.getenv('DEVICE_ID', '0') + return int(device_id) + + +def get_device_num(): + device_num = os.getenv('RANK_SIZE', '1') + return int(device_num) + + +def get_rank_id(): + global_rank_id = os.getenv('RANK_ID', '0') + return int(global_rank_id) + + +def get_job_id(): + return 'Local Job' diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py new file mode 100644 index 0000000..c2d2282 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py @@ -0,0 +1,124 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Moxing adapter for ModelArts""" + +import os +import functools +from mindspore import context +from .config import config + + +_global_syn_count = 0 + + +def get_device_id(): + device_id = os.getenv('DEVICE_ID', '0') + return int(device_id) + + +def get_device_num(): + device_num = os.getenv('RANK_SIZE', '1') + return int(device_num) + + +def get_rank_id(): + global_rank_id = os.getenv('RANK_ID', '0') + return int(global_rank_id) + + +def get_job_id(): + job_id = os.getenv('JOB_ID') + job_id = job_id if job_id != "" else "default" + return job_id + + +def sync_data(from_path, to_path): + """ + Download data from remote obs to local directory if the first url is remote url and the second one is local + Uploca data from local directory to remote obs in contrast + """ + import moxing as mox + import time + global _global_syn_count + sync_lock = '/tmp/copy_sync.lock' + str(_global_syn_count) + _global_syn_count += 1 + + # Each server contains 8 devices as most + if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock): + print('from path: ', from_path) + print('to path: ', to_path) + mox.file.copy_parallel(from_path, to_path) + print('===finished data synchronization===') + try: + os.mknod(sync_lock) + except IOError: + pass + print('===save flag===') + + while True: + if os.path.exists(sync_lock): + break + time.sleep(1) + print('Finish sync data from {} to {}'.format(from_path, to_path)) + + +def moxing_wrapper(pre_process=None, post_process=None): + """ + Moxing wrapper to download dataset and upload outputs + """ + def wrapper(run_func): + @functools.wraps(run_func) + def wrapped_func(*args, **kwargs): + # Download data from data_url + if config.enable_modelarts: + if config.data_url: + sync_data(config.data_url, config.data_path) + print('Dataset downloaded: ', os.listdir(config.data_path)) + if config.checkpoint_url: + if not os.path.exists(config.load_path): + # os.makedirs(config.load_path) + print('=' * 20 + 'makedirs') + if os.path.isdir(config.load_path): + print('=' * 20 + 'makedirs success') + else: + print('=' * 20 + 'makedirs fail') + sync_data(config.checkpoint_url, config.load_path) + print('Preload downloaded: ', os.listdir(config.load_path)) + if config.train_url: + sync_data(config.train_url, config.output_path) + print('Workspace downloaded: ', os.listdir(config.output_path)) + + context.set_context(save_graphs_path=os.path.join(config.output_path, str(get_rank_id()))) + config.device_num = get_device_num() + config.device_id = get_device_id() + if not os.path.exists(config.output_path): + os.makedirs(config.output_path) + + if pre_process: + pre_process() + + run_func(*args, **kwargs) + + # Upload data to train_url + if config.enable_modelarts: + if post_process: + post_process() + + if config.train_url: + print('Start to copy output directory') + sync_data(config.output_path, config.train_url) + return wrapped_func + return wrapper diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py new file mode 100644 index 0000000..c1a190b --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py @@ -0,0 +1,172 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""preprocess dataset""" + +import random +import pickle +import numpy as np +import lmdb +from tqdm import tqdm + +def combine_lmdbs(lmdb_paths, lmdb_save_path): + """combine lmdb dataset""" + max_len = int((26 + 1) // 2) + character = '0123456789abcdefghijklmnopqrstuvwxyz' + + env_save = lmdb.open( + lmdb_save_path, + map_size=1099511627776) + + cnt = 0 + for lmdb_path in lmdb_paths: + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + for index in tqdm(range(n_samples)): + index += 1 # lmdb starts with 1 + label_key = 'label-% '.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in character: + illegal_sample = True + break + if illegal_sample: + continue + + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + + with env_save.begin(write=True) as txn_save: + cnt += 1 + + label_key_save = 'label-%09d'.encode() % cnt + label_save = label.encode() + image_key_save = 'image-%09d'.encode() % cnt + image_save = imgbuf + + txn_save.put(label_key_save, label_save) + txn_save.put(image_key_save, image_save) + + n_samples = cnt + with env_save.begin(write=True) as txn_save: + txn_save.put('num-samples'.encode(), str(n_samples).encode()) + + +def analyze_lmdb_label_length(lmdb_path, batch_size=192, num_of_combinations=1000): + """analyze lmdb label""" + label_length_dict = {} + + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + for index in tqdm(range(n_samples)): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + label_length = len(label) + if label_length in label_length_dict: + label_length_dict[label_length] += 1 + else: + label_length_dict[label_length] = 1 + + sorted_label_length = sorted(label_length_dict.items(), key=lambda x: x[1], reverse=True) + + label_length_sum = 0 + label_num = 0 + lengths = [] + p = [] + for l, num in sorted_label_length: + label_length_sum += l * num + label_num += num + p.append(num) + lengths.append(l) + for i, _ in enumerate(p): + p[i] /= label_num + + average_overall_length = int(label_length_sum / label_num * batch_size) + + def get_combinations_of_fix_length(fix_length, items, p, batch_size): + ret = np.random.choice(items, batch_size - 1, True, p) + cur_sum = sum(ret) + ret = list(ret) + if fix_length - cur_sum in items: + ret.append(fix_length - cur_sum) + else: + return None + return ret + + result = [] + while len(result) < num_of_combinations: + ret = get_combinations_of_fix_length(average_overall_length, lengths, p, batch_size) + if ret is not None: + result.append(ret) + return result + + +def generate_fix_shape_index_list(lmdb_path, combinations, pkl_save_path, num_of_iters=70000): + """generate fix shape index list""" + length_index_dict = {} + + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + for index in tqdm(range(n_samples)): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + label_length = len(label) + if label_length in length_index_dict: + length_index_dict[label_length].append(index) + else: + length_index_dict[label_length] = [index] + + ret = [] + for _ in range(num_of_iters): + comb = random.choice(combinations) + for l in comb: + ret.append(random.choice(length_index_dict[l])) + + with open(pkl_save_path, 'wb') as f: + pickle.dump(ret, f, -1) + + +if __name__ == '__main__': + # step 1: combine the SynthText dataset and MJSynth dataset into a single lmdb file + print('Begin to combine multiple lmdb datasets') + combine_lmdbs(['/home/workspace/mindspore_dataset/CNNCTC_Data/1_ST/', + '/home/workspace/mindspore_dataset/CNNCTC_Data/MJ_train/'], + '/home/workspace/mindspore_dataset/CNNCTC_Data/ST_MJ') + + # step 2: generate the order of input data, guarantee that the input batch shape is fixed + print('Begin to generate the index order of input data') + combination = analyze_lmdb_label_length('/home/workspace/mindspore_dataset/CNNCTC_Data/ST_MJ') + generate_fix_shape_index_list('/home/workspace/mindspore_dataset/CNNCTC_Data/ST_MJ', combination, + '/home/workspace/mindspore_dataset/CNNCTC_Data/st_mj_fixed_length_index_list.pkl') + + print('Done') diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py new file mode 100644 index 0000000..ac19fe6 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py @@ -0,0 +1,102 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""util file""" + +import numpy as np + +class AverageMeter(): + """Computes and stores the average and current value""" + + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +class CTCLabelConverter(): + """ Convert between text-label and text-index """ + + def __init__(self, character): + # character (str): set of the possible characters. + dict_character = list(character) + + self.dict = {} + for i, char in enumerate(dict_character): + self.dict[char] = i + + self.character = dict_character + ['[blank]'] # dummy '[blank]' token for CTCLoss (index 0) + self.dict['[blank]'] = len(dict_character) + + def encode(self, text): + """convert text-label into text-index. + input: + text: text labels of each image. [batch_size] + + output: + text: concatenated text index for CTCLoss. + [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)] + length: length of each text. [batch_size] + """ + length = [len(s) for s in text] + text = ''.join(text) + text = [self.dict[char] for char in text] + + return np.array(text), np.array(length) + + def decode(self, text_index, length): + """ convert text-index into text-label. """ + texts = [] + index = 0 + for l in length: + t = text_index[index:index + l] + + char_list = [] + for i in range(l): + # if t[i] != self.dict['[blank]'] and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank. + if t[i] != self.dict['[blank]'] and ( + not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank. + char_list.append(self.character[t[i]]) + text = ''.join(char_list) + + texts.append(text) + index += l + return texts + + def reverse_encode(self, text_index, length): + """ convert text-index into text-label. """ + texts = [] + index = 0 + for l in length: + t = text_index[index:index + l] + + char_list = [] + for i in range(l): + if t[i] != self.dict['[blank]']: # removing repeated characters and blank. + char_list.append(self.character[t[i]]) + text = ''.join(char_list) + + texts.append(text) + index += l + return texts diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py new file mode 100644 index 0000000..87e6eeb --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py @@ -0,0 +1,148 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnnctc train""" + + +import numpy as np +import mindspore +import mindspore.common.dtype as mstype +from mindspore import context +from mindspore import Tensor +from mindspore.common import set_seed +from mindspore.communication.management import init, get_rank, get_group_size +from mindspore.dataset import GeneratorDataset +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig +from mindspore.train.model import Model +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from src.callback import LossCallBack +from src.cnn_ctc import CNNCTC, CTCLoss, WithLossCell, CNNCTCTrainOneStepWithLossScaleCell +from src.dataset import STMJGeneratorBatchFixedLength, STMJGeneratorBatchFixedLengthPara +from src.lr_schedule import dynamic_lr +from src.model_utils.config import config +from src.model_utils.device_adapter import get_device_id +from src.model_utils.moxing_adapter import moxing_wrapper + + +set_seed(1) + + +context.set_context(mode=context.GRAPH_MODE, save_graphs=False, save_graphs_path=".") + + +def dataset_creator(run_distribute): + """dataset creator""" + if run_distribute: + st_dataset = STMJGeneratorBatchFixedLengthPara() + else: + st_dataset = STMJGeneratorBatchFixedLength() + + ds = GeneratorDataset(st_dataset, + ['img', 'label_indices', 'text', 'sequence_length'], + num_parallel_workers=8) + + return ds + + +def modelarts_pre_process(): + pass + + +@moxing_wrapper(pre_process=modelarts_pre_process) +def train(): + """train cnnctc model""" + target = config.device_target + context.set_context(device_target=target) + + if target == "Ascend": + device_id = get_device_id() + context.set_context(device_id=device_id) + + if config.run_distribute: + init() + context.set_auto_parallel_context(parallel_mode="data_parallel") + + ckpt_save_dir = config.SAVE_PATH + else: + # GPU target + device_id = get_device_id() + context.set_context(device_id=device_id) + if config.run_distribute: + init() + context.set_auto_parallel_context(device_num=get_group_size(), + parallel_mode="data_parallel", + gradients_mean=False, + gradient_fp32_sync=False) + + ckpt_save_dir = config.SAVE_PATH + "ckpt_" + str(get_rank()) + "/" + print(ckpt_save_dir) + else: + ckpt_save_dir = config.SAVE_PATH + "ckpt_standalone/" + + ds = dataset_creator(config.run_distribute) + + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + net.set_train(True) + + if config.PRED_TRAINED: + param_dict = load_checkpoint(config.PRED_TRAINED) + load_param_into_net(net, param_dict) + print('parameters loaded!') + else: + print('train from scratch...') + + criterion = CTCLoss() + dataset_size = ds.get_dataset_size() + lr = Tensor(dynamic_lr(config, dataset_size), mstype.float32) + opt = mindspore.nn.RMSProp(params=net.trainable_params(), + centered=True, + learning_rate=lr, + momentum=config.MOMENTUM, + loss_scale=config.LOSS_SCALE) + + net = WithLossCell(net, criterion) + + if target == "Ascend": + loss_scale_manager = mindspore.train.loss_scale_manager.FixedLossScaleManager( + config.LOSS_SCALE, False) + net.set_train(True) + model = Model(net, optimizer=opt, loss_scale_manager=loss_scale_manager, amp_level="O2") + else: + scaling_sens = Tensor(np.full((1), config.LOSS_SCALE), dtype=mstype.float32) + net = CNNCTCTrainOneStepWithLossScaleCell(net, opt, scaling_sens) + net.set_train(True) + model = Model(net) + + callback = LossCallBack() + config_ck = CheckpointConfig(save_checkpoint_steps=config.SAVE_CKPT_PER_N_STEP, + keep_checkpoint_max=config.KEEP_CKPT_MAX_NUM) + ckpoint_cb = ModelCheckpoint(prefix="CNNCTC", config=config_ck, directory=ckpt_save_dir) + + if config.run_distribute: + if device_id == 0: + model.train(config.TRAIN_EPOCHS, + ds, + callbacks=[callback, ckpoint_cb], + dataset_sink_mode=False) + else: + model.train(config.TRAIN_EPOCHS, ds, callbacks=[callback], dataset_sink_mode=False) + else: + model.train(config.TRAIN_EPOCHS, + ds, + callbacks=[callback, ckpoint_cb], + dataset_sink_mode=False) + + +if __name__ == '__main__': + train() diff --git a/examples/natural_robustness/ocr_evaluate/default_config.yaml b/examples/natural_robustness/ocr_evaluate/default_config.yaml new file mode 100644 index 0000000..63f94db --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/default_config.yaml @@ -0,0 +1,76 @@ +# Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unlesee you know exactly what you are doing) +enable_modelarts: False +# url for modelarts +data_url: "" +train_url: "" +checkpoint_url: "" +# path for local +data_path: "/cache/data" +output_path: "/cache/train" +load_path: "/cache/checkpoint_path" +device_target: "GPU" +enable_profiling: False + +# ====================================================================================== +# Training options +CHARACTER: "0123456789abcdefghijklmnopqrstuvwxyz" + +# NUM_CLASS = len(CHARACTER) + 1 +NUM_CLASS: 37 + +HIDDEN_SIZE: 512 +FINAL_FEATURE_WIDTH: 26 + +# dataset config +IMG_H: 32 +IMG_W: 100 +TRAIN_DATASET_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/ST_MJ/" +TRAIN_DATASET_INDEX_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/st_mj_fixed_length_index_list.pkl" +TRAIN_BATCH_SIZE: 192 +TRAIN_EPOCHS: 3 + +# training config +run_distribute: False +PRED_TRAINED: "" +SAVE_PATH: "./" +#LR +base_lr: 0.0005 +warmup_step: 2000 +warmup_ratio: 0.0625 +MOMENTUM: 0.8 +LOSS_SCALE: 8096 +SAVE_CKPT_PER_N_STEP: 2000 +KEEP_CKPT_MAX_NUM: 5 + +# ====================================================================================== +# Eval options +TEST_DATASET_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/IIIT5k_3000" +#TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" +TEST_BATCH_SIZE: 256 +CHECKPOINT_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/cnn_ctc/ckpt_standalone/CNNCTC-3_70000.ckpt" +ADV_TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" +IS_ADV: False + +# export options +device_id: 0 +file_name: "cnnctc" +file_format: "MINDIR" +ckpt_file: "" + +# 310 infer +result_path: "" +label_path: "" +preprocess_output: "" + +--- +# Help description for each configuration +enable_modelarts: "Whether training on modelarts default: False" +data_url: "Url for modelarts" +train_url: "Url for modelarts" +data_path: "The location of input data" +output_pah: "The location of the output file" +device_target: "device id of GPU or Ascend. (Default: None)" +enable_profiling: "Whether enable profiling while training default: False" +file_name: "CNN&CTC output air name" +file_format: "choices [AIR, MINDIR]" +ckpt_file: "CNN&CTC ckpt file" diff --git a/examples/natural_robustness/ocr_evaluate/eval_and_save.py b/examples/natural_robustness/ocr_evaluate/eval_and_save.py new file mode 100644 index 0000000..a015a46 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/eval_and_save.py @@ -0,0 +1,100 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnnctc eval""" + +import numpy as np +import lmdb +from mindspore import Tensor, context +import mindspore.common.dtype as mstype +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.dataset import GeneratorDataset +from cnn_ctc.src.util import CTCLabelConverter +from cnn_ctc.src.dataset import iiit_generator_batch, adv_iiit_generator_batch +from cnn_ctc.src.cnn_ctc import CNNCTC +from cnn_ctc.src.model_utils.config import config + +context.set_context(mode=context.GRAPH_MODE, save_graphs=False, + save_graphs_path=".") + + +def test_dataset_creator(is_adv=False): + if is_adv: + ds = GeneratorDataset(adv_iiit_generator_batch(), ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + else: + ds = GeneratorDataset(iiit_generator_batch, ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + return ds + + +def test(lmdb_save_path): + """eval cnnctc model on begin and perturb data.""" + target = config.device_target + context.set_context(device_target=target) + + ds = test_dataset_creator(is_adv=config.IS_ADV) + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + ckpt_path = config.CHECKPOINT_PATH + param_dict = load_checkpoint(ckpt_path) + load_param_into_net(net, param_dict) + print('parameters loaded! from: ', ckpt_path) + + converter = CTCLabelConverter(config.CHARACTER) + + count = 0 + correct_count = 0 + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + with env_save.begin(write=True) as txn_save: + for data in ds.create_tuple_iterator(): + img, _, text, _, length = data + + img_tensor = Tensor(img, mstype.float32) + + model_predict = net(img_tensor) + model_predict = np.squeeze(model_predict.asnumpy()) + + preds_size = np.array([model_predict.shape[1]] * config.TEST_BATCH_SIZE) + preds_index = np.argmax(model_predict, 2) + preds_index = np.reshape(preds_index, [-1]) + preds_str = converter.decode(preds_index, preds_size) + label_str = converter.reverse_encode(text.asnumpy(), length.asnumpy()) + + print("Prediction samples: \n", preds_str[:5]) + print("Ground truth: \n", label_str[:5]) + for pred, label in zip(preds_str, label_str): + if pred == label: + correct_count += 1 + count += 1 + if config.IS_ADV: + pred_key = 'adv_pred-%09d'.encode() % count + else: + pred_key = 'pred-%09d'.encode() % count + + txn_save.put(pred_key, pred.encode()) + accuracy = correct_count / count + return accuracy + + +if __name__ == '__main__': + save_path = config.ADV_TEST_DATASET_PATH + config.IS_ADV = False + config.TEST_DATASET_PATH = save_path + ori_acc = test(lmdb_save_path=save_path) + + config.IS_ADV = True + adv_acc = test(lmdb_save_path=save_path) + print('Accuracy of benign sample: ', ori_acc) + print('Accuracy of perturbed sample: ', adv_acc) diff --git a/examples/natural_robustness/ocr_evaluate/generate_adv_samples.py b/examples/natural_robustness/ocr_evaluate/generate_adv_samples.py new file mode 100644 index 0000000..62b87df --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/generate_adv_samples.py @@ -0,0 +1,139 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Generated natural robustness samples. """ + +import sys +import json +import time +import lmdb +from mindspore_serving.client import Client +from cnn_ctc.src.model_utils.config import config + +config_perturb = [ + {"method": "Contrast", "params": {"alpha": 1.5, "beta": 0}}, + {"method": "GaussianBlur", "params": {"ksize": 5}}, + {"method": "SaltAndPepperNoise", "params": {"factor": 0.05}}, + {"method": "Translate", "params": {"x_bias": 0.1, "y_bias": -0.1}}, + {"method": "Scale", "params": {"factor_x": 0.8, "factor_y": 0.8}}, + {"method": "Shear", "params": {"factor": 1.5, "direction": "horizontal"}}, + {"method": "Rotate", "params": {"angle": 30}}, + {"method": "MotionBlur", "params": {"degree": 5, "angle": 45}}, + {"method": "GradientBlur", "params": {"point": [50, 100], "kernel_num": 3, "center": True}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], + "start_point": [100, 150], "scope": 0.3, + "bright_rate": 0.3, "pattern": "light", "mode": "circle"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], + "color_end": [0, 0, 0], "start_point": [150, 200], + "scope": 0.3, "pattern": "light", "mode": "horizontal"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], + "start_point": [150, 200], "scope": 0.3, + "pattern": "light", "mode": "vertical"}}, + {"method": "Curve", "params": {"curves": 0.5, "depth": 3, "mode": "vertical"}}, + {"method": "Perspective", "params": {"ori_pos": [[0, 0], [0, 800], [800, 0], [800, 800]], + "dst_pos": [[10, 0], [0, 800], [790, 0], [800, 800]]}}, +] + + +def generate_adv_iii5t_3000(lmdb_paths, lmdb_save_path, perturb_config): + """generate perturb iii5t_3000""" + max_len = int((26 + 1) // 2) + + instances = [] + methods_number = 1 + outputs_number = 2 + perturb_config = json.dumps(perturb_config) + + env = lmdb.open(lmdb_paths, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + + if not env: + print('cannot create lmdb from %s' % (lmdb_paths)) + sys.exit(0) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + + # Filtering + filtered_labels = [] + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: continue + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: continue + + filtered_labels.append(label) + filtered_index_list.append(index) + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + instances.append({"img": imgbuf, 'perturb_config': perturb_config, "methods_number": methods_number, + "outputs_number": outputs_number}) + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + + client = Client("10.113.216.54:5500", "perturbation", "natural_perturbation") + start_time = time.time() + result = client.infer(instances) + end_time = time.time() + print('generated natural perturbs images cost: ', end_time - start_time) + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + + txn = env.begin(write=False) + with env_save.begin(write=True) as txn_save: + new_index = 1 + for i, index in enumerate(filtered_index_list): + try: + file_names = result[i]['file_names'].split(';') + except: + error_msg = result[i] + raise ValueError(error_msg) + + length = result[i]['file_length'].tolist() + before = 0 + label = filtered_labels[i] + label = label.encode() + img_key = 'image-%09d'.encode() % index + ori_img = txn.get(img_key) + + names_dict = result[i]['names_dict'] + names_dict = json.loads(names_dict) + for name, leng in zip(file_names, length): + label_key = 'label-%09d'.encode() % new_index + txn_save.put(label_key, label) + img_key = 'image-%09d'.encode() % new_index + + adv_img = result[i]['results'] + adv_img = adv_img[before:before + leng] + adv_img_key = 'adv_image-%09d'.encode() % new_index + txn_save.put(img_key, ori_img) + txn_save.put(adv_img_key, adv_img) + + adv_info_key = 'adv_info-%09d'.encode() % new_index + adv_info = json.dumps(names_dict[name]).encode() + txn_save.put(adv_info_key, adv_info) + before = before + leng + new_index += 1 + txn_save.put("num-samples".encode(), str(new_index - 1).encode()) + env.close() + + +if __name__ == '__main__': + save_path_lmdb = config.ADV_TEST_DATASET_PATH + generate_adv_iii5t_3000(config.TEST_DATASET_PATH, save_path_lmdb, config_perturb) diff --git a/examples/natural_robustness/ocr_evaluate/image/catalog.png b/examples/natural_robustness/ocr_evaluate/image/catalog.png new file mode 100644 index 0000000000000000000000000000000000000000..af6fe3594c163834ff34d8f60b90f2df3b77fc23 GIT binary patch literal 14419 zcmV-ZIIPEsP) zAu`ZHgb*2MAwq}@v=AZ0Jd_LHwxs@^|8DoFT&%HDULQ4-s|7%b>EIVn`#Ar#l@skl zzPhj``+DT?JJ3H~T+g&*cfU8f))2*?&W^qBl|@^0awfGA8KF~J6La6U7eWlo??CUr zl)#jgb*M_;%XFjj@`o`e-N~GEXTO*4I&=R|79*8O0c{lhF5YO{UU4tK16}Wk;9QlJ z#hHDF?p&3XdPl^NZ&+^`(_K6}RUkwL;VC;!`IGync=6t}t_tV&!5zkJe5@xE9NIr?93)P#2ak8FkQ`=YpM!1y^1 z{&^;Ry*HMFZD2}r^V^jMd%le3qOP;y2?x_|MbebuBRxi^t|XRE8Z1UtXbuJVKn>e6b!}x_I`XzXW86#9-p9NQ=zrd!oGqi&bI~H*J8N! z`x^T2wFKs+w_`Es-Cy%)Du=H<+|l_#8SV)79*d^Z398k{vn$*Ao9QOrxTInzI*%`R zF{?G2e>)aIi%DbMau*X-l>!u6Z$?r!*Uh4YD86p)TwZFXht$SM{y4xe19UW)xnb>K zs@348!(eKPPO}T9rRW@LQ%Oqk5!-0tdiQQJm@?kr&><_k+<>C9cAbkQt|;FAGJ@NR z&hjNrezpw$?QkTGUALK>s&nA5mA`ltY?&^8y`+OHUqw-7Xslh<&Wz@0{^!vMT2+n5 zm$}GPRn7&s=a`(P^WDK1u5>8Gq`3L}WDuObHg3nO!ILsUV zN2~&ByoW{cCXQT)ph*MWW8!dW1Z`bMW23`Nel0ARB`2eeglLVHb`z(r#}0J$A(cGA&BhsCGGh!BLjkjaxy_8l09TZO)u+%?j* zE#AA!*If$s`xqdHyXXB>XzPI3Xal8dql{VGKm?#5WUB|>|oiQlf=>G=^cW>XW;xK6>L;xL~UsGcYY(al3 z493LjsF=_J6obSVaNNFk$nVhIDt668-a#v;{A{0^5O`mU;S=!u`C?rRC$GnGv2GZD z%w#r*M4{fU;sulljg^zN?q?&t)y(-eo#j(CEC7nmoatW1_I!}(X7Z~PW=?Ns&3J|L z*G+i)tjIrKvzdwrFOO$J`E3=$FmbgWa}GkaiGqI4PK?$F zPBdz)U+5t-%0R)u9HBEQ-A9TV2JJkT2|M@~dzS=hlZo^2=6fco2!d~ni2qX7Zm9yUxIJ`qjq zVmJTt{dTk^RFv(KKmkui`oEJ z?E0}(<`o-%sC;CbCx+3G{_3X;=HQ4N<71*`;+kDSiS)5H+l#3-iV8U>ie83vphp8G z#>3Cnc98Dp)Yn}d!@r$3G0;KqQ7}dMczThGu@+Etg}Pb`@1Kvr9qr+zfwsL+Uf714 zAwA_5LOfdX@kt>>YX2YTz4%^W&%l%}TUL0xWCjFq`LOR+>najmrE zzHc9szJS#$7Lb)O7F#5Av^7w5rI6#t3#f8FYiJ6)F?T>%Ng_4WpojgT*``|WSv$E%rl|8TD%fI~dN4>tH5Hl^8m$y!*^~`sC zy1$&;PMt_wD%0jKW$vVl+$?z%1EOM5bu{^1Y%$1&{FGZ&Rp!l|!_2HKPM_BK{>{<}&F6)*rbZVIb%*OD_Sktp2Mm*sO{-w7(5 zfRfCrS6^aM*&&+8%_hxe#cFD%c<&w#mbWwI$!)Aiu@JHT6@IzOK(Ec?kDpaD@0FLC zTydDzakEK{i{{pmKX5iKmnRoZBqc5q)n})s^cY`#TS9{mP@}j4_GMOX`=$f0R zl6~yhU(R@c>)2g*UlkJ(njYC}Xp1F+RV!C8Z|)q9<(=g6l`D^G)!me&$xMlFrnIES zZ`{Bz0414aTeq^%zK;*zD560b$KuV;^Zaujw(rQN2>=rlr^a(^#|M1bt`Ik83$Jfl zMfsb1Dciq`ya}(6ng8D1(_tkUP?0MbZ8jUf+URkl z+WK8AU{F(dit_H)=w50|i@4tGMl-PVh60jmOruA!+hB&UtV7OA3p?9|q_ z1YKDX;pvgT%Wb#YhwTWhw>xQPBC%E#?EZ&Zv0AZec3OJmfsEExTCfslRp~rL8+bZ8 z?neDI4IL9^e}9?a?%HuVT|JJbW)f$vWc_0^m=GTchJg}c#oTm-2*7AK&F(MEtj^iQ zYfszI8Y?I|evreLn}_}5oMPm9WCQK-c!&Kpw9!~WS?fw>&K}Rv+M9ijc8UUq(}~lR zM1mEtcYWO!PdrYl2RO3dvmT?Lx=)YZkP^RuXP=!;-KQV(y9zsA18j?b!avTra~&TI zrAOFZdIU@{q%O$iN1LCd>CIi7cMY@-5zdDElv~$zii$7r-uoYLCjZ>qh*;p!aWz z1*`ARp@j%-qhg?i5W$fTX9y8OWT1rzAu`ZHgb*2MAwq}@v=AXg23m*^A_FZ%2$6vn zB81353lTyj5-3Cnkx1ZRM*@WyMiL2p=t!Uh|HQ~4kwAJy0!*%~pcQ|nPZgTpAPbiRg(ZvvwNMNr>pkg#rcN->5&FCCrHI`{hvKbp_ zPiZw2UUV|;v5l;l?xCcpvik|cXt==Hn`x}hT}XPY2?a1kC6JLdnPfTR7K2D4fxRPv z29zG$$E3~Q;}@^HX=`^-bG3q6ZA^e=Yt$8&(6np~6B>{4RdtV3dZUGNyLV!&UB~9v z))60J;B&W9cfEjbuT>1`Jr?0*R5ZmT5-3D)M#Vr2A%Y_x&JZGm$UqAbLS&$Y2q7}i zLWB?*Xdyy~473m-Lg!WWf(dhM?Kezj`GcsE3^#b2j9fd2*XI=)4aTO8uqi_@!5e2ZaKWD(PK!TnoZ8U zDO|29#6DbyDZ@kEr2^^(ccHYN{*d2&dlOAD6FWYK&D)-3bK5)o@oeiLJaI@QaU==D zNZ>q-b354iU5~(l_cmn(w<#;Q^93{MOE>Z4qKTx$MWXub)RZ3Mt8Yta@Bslsf>BId z_B2l|nM{%ebj?ju$v$@MzeYRJ16Ty@tLM37j`~&afqj zl;kN)jc?{sKas#5*Fvgy~FA z(WotLf+V9OL)dtsF02FKsxAD)}8Ym{BqD{12+Q*Iyj)yqi zzBiI6MvkzG1R8D^?Zn&@3G`n3mUp%vLd{&y>(54Y?GQ7GGgq?yu^CK=j|9U&iLhdB zxuEU6?k~)&&e_CkPutKMD=0dCki(ao(FVG#&ys@K4g*g|NB3|Vnud-EvwG)n zuhVwn3*J3kgN}+NHjPEO8~O1@7w>*~71smp$zG8}F=B*OBv5atpuBAbS+mkOT3d}5 zFnn$t4ludB7+t$ViC@68&rYZA(~tRGg&nT}w#7f;ALn!pNx+bxkA~7C>@Gb5rWjHe zBCHC>Y*$oDN6l6NaOWyut-M_QWhEe0`NGz0bQJFp?-n zhA@i+R+G1{7}MhCcwy~qCX9(D!eqi?O(G*b@$OO25=kU3ni|{i8lXgFuw+43a~mK) zNKi?f%$&)|#FzoijjOE#AAm9NWqkzNj-V;1s*0+bh>Xu9XLcGiH`3zoMf8J462*uR zc9B5G)vtJO$3j-GSjyIyH(`rX@wr;KU3HCbcb?(021Y{x`%b2_@u#n|pv{5D)5NtK zwP@q-EXF-xNKi2+lC>t6+_6?7F!0!`Dc-Y>VwZsu+y7;~4|)N&BPa^-OMl9LTiW@T zL3EFu`Wwak^|RC5_V-3yfcqv!wo%a(lSrTt!5I|;ErbY;yfP9ZgvdY(5kh33g$N-s z&_aX|8E7Fwhzzt4Aw&jRh!7$JEkp>Bffgc!NF;CwDYhJ5{6!WezyFA{j`)!gJkp`J;;SdUw{B$J5!$A#)=dR5$j*!m#Yl)+C2XF+10Ljzx@?&e{u=Cc|0q&ZexP!HW&PEOgsy>yueD|QU3haPO3X1nYi)?Z2RGA zn%_A{wGIOX0L%98B5%SgWahtj&nb6r5(Nw|5()Gd3Dlu)2yftEpDSe+bH~&{PCG(%H3SX-`dzX_0mGO? z0{un;p>GIp;9#F(wG!iLp{4!qE%cTqS`GgLt;X4~spDAr3Rfb#lYapM zfc{7nFq}yw&`%`ruDgR1DA;E>owy=Wutvb07pjUimY9BKPj@(Q_-=CggHI^%cjODA z!Dkqt^tq`-0mGR@0{uk-{Rt54)9Wu$(wIR`PHN|JlvuKs%qFRyJ!~{w;OxybR_87x zJ=TN*n4%KM$eK)YP!&Kou6Bd?q%nS?ff5A_R}u;I6AAQpFbWv#)9cCG{S7gjp5^sf z9XLDO)R$kQM)QBljnTrn-8(VXu4D6S>xhpq@VQ&5yI#Pz*D8XzEA3o4d4{>0Uf{Qj zO=#7Jczb6l5(NxzqoOG$kw77WGb#pJ2oW54r6fcMk%1N>gvdY(5kh33g$N-s&_aX| z8E7Fwhzzt4Aw&jRh!7$JEkp>BNZ=3}k&(c`B*DQEMDbDsv>W+w(_(wA=Pe;)?CJt$G*S-fE#OJ}4JZ`NtIS95yL zE{@me1Mkl;A(6m-g+?Us{+O9GHQS${-$>v;|N2+_%ddaMzx>npn9-g8@E-}h-t0y*z}tL@qvz^Ko-_fg z0_vSe;Ni|lpx)jY33T*xn%*4=+@DXiO9QPd5*U?5c3O7_rEerKdK{^?A=y2EPTjd< z6xO@ZH9OZXRM2j1<>KK>GJyj z7=})JVj&;xC=X~2CL z==N%C9I;vpP4>QP4Q_viK8XZ|TqLl+D+e0`96EvaVIaDrjW*L1661iD-j@Z~pJ75G zfgu_vN>uZQJ>9n_2bLxv-RJT6l z{tOYLqA4bkKp}!NDh65z5ghq&h7ch{23m*^A_FZ%2$6vnB81353lTzOpoIt_GSEVV z5E*D8LWm5s5Ftb&frDu{qInYP6Fr1iBv6TA;S2x7=BYlee)$$3op*E(fHH11KYw*K zDIG<8^lx8qHPDB`{6+c(Jp|abz0*2BE@T)w9!EXbi;nWmkt?)>NrgiMFU%r=00wPM z4#v;VW=!EJnhXFF8S}HTG&bV3KEwtMNi@%(_4J4Q?%SJaikaB)Ic(ncESuZj;g4rq z2jPih5CVu|#0alQAOHsKSBt2gIhXk*zhtr z%R7ioyvCucE{utHFITKqteTw`M~~aIw$g%?IIBw6;dMa4EhA6}P^ay}7rcA81|1bk zY#NJlH}d0+F5dm}Dy|2*_q_v%B2+xGBecik<=F8&{?{LO4ErzxU^uvVzKRJe77
  • PNJFi@l~_+gvdY(5kh33 zg$N-s&_aX|8E7Fwhzzt4Aw&jRh!7$JEkp>BNMI5De9?b$7K)W6r+7rQSbEU-5ukwP(=gkdBQ z?nL)AkDVK5&r`Ab`=q8=;FcRDDvg}=>sgkSMx5D)y}E?Mdk=D<(bpCA>lN5L`XT4z zpWvw_lS#6GuDNL{*~gCk*J!8n`ctnTqd9pBNm0?nSX#O8?bjSB?@R~{xIRFM8qeYl z>sUG?jd-(8ySA}9yh2fIzzdu^WW{2XDDdSWPX+nHNg6m8p+#x zib{uuVd%6sR&vvCs-!Q&b%nyR7_#S$@2skr(wLWRqoVLKZ3aQ!Uoo15VI;6Cx_77= zP50txw$KAq~mn-c*ogr;c!AstTo{J)QC7@wHBKCiK+x-xVc(^cg5u_E6Rlzt8*8U9&17YOi>AB zWKAZypEFP;aWZozClg}^G&ioc4t&7fL$XTJ{MF=)v!E!Eq%2s?;xSbeUv5PYus);V zBIl}-S)RLuj93#WCZduuNRL+tCQ=B1Rzu-MC(|C=$cpJ6N{TAGUk*IT{S_lyct-+z zGg>&ednd-)b!>iZ9q|zcK6fj1*9-XeZv?uL000;cNkl zgvdY(5kh33g$N;|;y1-45-3D)Bof$EL+Qx_d|P-MPf*M77v~#-+X(Rh!aNcfL)wCk zJpaA#%W^xo9FOagnG#J2j=p`0Cpd8hriTrjWKs3G|7<73PsZ6A9C2 zkYR44w5jW3|4K5;wr*vieIFmbQAC3>j>Vgw=lSP6Y~PVjnZGz65m?(QIP~#es#@Hr z2{U=>`DfU&^cL^tHTb`c63-$D^oc?C$lnyZ6A4Tmm%`zr#~$_S>5c?uYbYiQF=hvs z_J78at_f08W-%|djuW31QQrY@i?iS5v+&iq%t}1R@g{%E^io?|)cs#t^Cga+Tg*R9 zpMdp50}g*%aVOCCpZQd~0J;Kw3!fxA?FiL3WunFX3*SiK;Tn{fR2Dq3kp&rvMCI4g z0Vq~0R?SXJk0~@-TWP^coK*$C18pX8=1SH-HiHTAkzg1o5mwAiSBU6mD^v;ei6Ica zkwAw7&2fz*pC4r0-)-QDnHBt{)P>=6;xr|ZUBCdJ~0%+J`!j+N;!PiLDu6-8E*o;{u0Hv(|By% z9Fi>xN@NN->lTq%bAi$(1HW-TOC*uFXliW3Yk(4!!IA~zd&K+t-bMm_Vkk%?5RKbs zk5js66Km$4KSocwe*mfUr`_^XXX8tpVz6tHV|K1~LIpE%#n0`{FuXX8&_XF;0- zkEe-iH)_$wcdf$k7xc@QK%W?NqoP7G5kh33g$N-s&_aX| z8E7Fwhzzt4Aw&jRh!7$JEkp>Bffgc!$UqAbLL?G+Ka>QP|LirU6u!xy3S5u04W`&~ zc<~oml>Gi9&bEc|FZT`O_h2*#uSg(ZNtw&~^^Y+tBMGbOrQOj?T}3|ozAmEvejgnJ z#{U$?u6&uFuT7-vi{G=e&^dSyKEJWUYD$iO%Yh3Gcn51egXzvTPjyo8@pkrH>*y-C zB(n15*T}lKoj;sx4q_jfhtwt&tWxR57UlIvwp1 zWUpPy>58M&J;*0%4G(pf3aA_8EesW56$w;gQ%SQnQJP;$ol^&V*y}4PuD^2)GwDnF zi~aRpTL1Asy8)KT^O+lagF~O!v3SdT7Nq3g9W*7GRjI&LhxoIld$BzBiXg30EX9x+BV(>guU)apCKJR4M2K#QyGm@ag>o0|6z5tOYZPxW13#t94i!7PDx<1di^# z!QF?EiHTF=Ikw{izHC>Bo3n-2H?5-l&AnXLlUcU)8K&R^`)&jZ6#$k1G?Ws#lM-Atjb=nUnq&VRR8o}r*QllfV;t}}uD zFIR(Heof&i%KhY(92bvp!akGrkBy^uoTWR`_+4kD(QtQ08k^JSGAq%~If|~KsWBv{ zjl~wJqI>Mr*0$hIGTJZmr}ECjkNsjX6>FbjQC2KhiX2@>asy9CNB2?P&@^;Rm{kSEY9+?gLQDJI zVbEKeXf=|0tfwn>c&Hjp_u_VY(7LW}3MxRT<8<_Rm)LOOa6(M16$S5rmj}Cim$RRD zq4fsN94uzhwzVuQ+JEofyU$3Y(QumGUzk~)vx(Q9wxKmvP;~qthc7on7={?bFcR2< z=A!n(SxT1AX4W_xYHen~*k5nGtP;0j**&N1!g8&6wiT{==a? zm+5rkib%m40d(yu#Twgn7I8;c?9l0?_udQ`AXGa4FJX)g6*oG763-Z$f~(SnLBxQU zYe6mlo`3Y=t;*xj^;db~@o5zISVS<9#(Vi_C_TdN(j#DsA$376Kid2xO>gd^FbqSC zVHF8fVyClueGb!86Nt5_m@GEZ=PhN<7&kT5E$9Jbf4%jM|3yw-z}(nc4!!>t|GE8L z-rD{yZ*AYs?{=NTK7JnaQUdRCz5Wsu*Rgepn%SCcc& zf}%u{vS2lf$5c^#xwZRAsK3kg!3Z20PD&1)!aidKi<0gRe&9&sy_Ces%$b}_j2Y0} zxY{~8-={FbGsGBPkwCALcFR0gKevi;2{D+B4jhfuT>9n{zAN)!5Fqx~Tc7yfagKOr zpkDiiv$YOfJs*E%r#VwSmt_kxIKJ08@OJ9;`kK&VlEzMQeSukf1`E2^`Kv%B|@-%8hkb%4tWX8A#EW@A1F&M)M}r9IwU&;vvo zdmZ!@b0S%5a>*TQB?1GFy_(`Z`zUr9gSMTc$*5?GNhENPb^X|E!l^gdS={?=<{&N? zBi5++)lV5rA%Y_x&JZGmjEYlkAw+QG2rWbik%1N>gvdY(5kh33g$N-s&_aX|8E7Fw zhzzt4A!Jnirr3yy1P(;vbz%gNNZ`Xn0)-f1Boa7Okw77afkXoDi3EZYHJ-&A*0FR( z8u4bGc6&9a_w3?$jd~ve!8^+euRxs`ei8}n83|Mp$l3Y=tMxoS-M*6>PJ^h#43ads zzkncN8K@J(Pa=UmB7sVBHaQuM+Cf&oK)LJR?k z1onsoY7G}TSC!22+$Chhnm{oTm6SnxyaM+b5Ddpaofv)+3G5LG#PE>6`xDd+>)G=9 z8sbbk?XA_E`eGN=wf7qk48K5~7=ELoSR#=?A%Zh123iOa9QkmD5FtbcT8I!L11&@d zk%1N>gvdY(5kh33g$N-s&_aX|8E7Fwhzzt4Aw(j9J;~oi7NeX*0((XR2X!ljc=RL^ zIINLC6_cu?>BF*e5rQNV*fSE?)hbZqrt!p&R8UFHjvlj#OJP;lE%)f(A8(rzn>!kr)CJ3G5LG+8`Mk-(mjz(4|o|BQzF3<(M`$VNp|Od^3o1ZPwXv=AaV^5G02LWm5s5FtbcT8I!L z11&@dk%1N>gvdY(5kh33g$N-s&_aX|8E7Fwh(rR1laj#lpS{ME!Z-O-fqbM=JiPFU z1V+x`Z+^X*tbXpX;kn2s|L0}R7J?=#F2@e`v-HDM-CJ^u__mfqssyoRpRUK10i#&c}P2YlJC5I1KF zuWwpK`I~#Wk+hhNE21cP{|!zzcu_1i#wDrv4E%&k6!fV`{8Pu-#JM2NPY<+23>eX0)x^!;ulQ#Pp9hq87kX5 zZ|^8Cr?x9BXy_W68bfm0SZt9hy2nm!Z3|w%>kejJ_X->?6yYT7B7uSRj`#%={xk4& zbleRKY8rZ1SkP!V&F(MEtj^iQYfszI8Y?I|evreLo9TP-3}#*T3LGvJ;Uv5xf&IFB zI8@>n1R4J8kB^4ZBkV3c0;U*J7v%Dz%}>(w<}S{=daX5xbr}XIJ?1n}xKM@xbiDHd>DMmtwdo?-5$7x?XB6I%5l-u@_mz;K}mFQcL< zCXqlPf-@=xS_lyw`EZ61Aw&jRh!7$JEkp>Bffgc!$UqAbLS&$Y2q7}iLWB?*Xdyy~ z473m-WJrIPdsyRs65zWpiYI8#xi82fs(?5f%4)ASx!sL*1nU>K<}Q z547$+VL^!v24}SGr;)e4{ho4hA>dW#uubusVBrW5W zSFU3on3+7WX&v*^qVd^lC^~zKj16fN2Rg;=`d&`GevIbiDI`Tj z6Ju%R!na>@q}+kw_i+~Z`#R9R9&7<$Lyw5KpOVb7ty@`W-^Yh<6w#oJWAWzadHy*M z+jr#C1oVmfDG4lpEZF5IkhAp#R_l3ux_u`%oCZ;e86;^By6$9Ft?2!=cow}dK+I3? zZyR)O`ilb&e1`^%4H`-^%eFql^xI$Y_9vIHo5!oz8sZgT;!mljjGb{X@=o#H^9 z2B@TD&tYtJKD(L+Uc0C2I@Pv$Op1rvmN+I&jHbn|GI^qvyhbNuCS_u+IYU)A6j`z4tNa?Ebbl(D!xVeLdKMewTYV<9=$&Eas)wapJoo>N~)2-{S0d`821`Wme*y z=epi;zu+m!+2mw2lDGF1l@1NV&}nb1dpY;+r{MN?|4~LYc_wp`ZgTSQHClaOxNA9m{37j=NtoT3czs zN}N@t^J%+x+%I@)Y#gy#3r+UHT=%_S3l#HHBl_Ql`!{fGP_bHx@wCv=es_!YmL^(_ zq^>*Y=4$a3ayQLo^1&)9)8{ff@)~=u2C;vQR<2jJv2xM`tnHIXyIsx~Wf9C>IhhGo zg~_SSl<#lrv7CGNQ*isc|0tspYbC~O$KKIn4fa;rjFcevosQGd<9-@098QRdwW8qd zv%P`7uZktWz8+)&2YMvL{S2oQrzwd9E70|TS8VabR+DN3)PGdy@yLRW(EQ>WWKfTDFD>jmP+^ z`aWLtd8#O{Rmo1S;(C4O?dmG3(3j4ls-l_!|3J^fey{c0`jOa#Q*W@dIH=bbBhHZi>Zc5*c=Qw^Q>QS&dy|UB4isw!%OB51 zz4iq+Mp+X6L5zw&`2Mhz_=Pc9c){mC<2a$O{8)`vK>p-`GJW8DAknhBffgc!$UqAb zLS&$Y2q7}iLWB?*Xdyy~L;}N*1OXqNN4>8l5-3Cnkx1Y}8+oC)(P6ahx6!e^5(#9K zhwUT?7`8z`ex8e-Z4tUb`k^(-yonr5(ylkf)3k> zdxTRE&>x8ejtJo$wtH~0-+Gfs;AjuqNf0n>gMj`>Bv6PDA|Ia=B81353lTzOpoIt_ Z|37<5Ao1KMIza#c002ovPDHLkV1h7suxDYO%jI?7$6P=k^-g}!VE=ih23OlsM^_` z8K7#18t*Vu#T3Vq0#ds`E|MXE4NUHY+;*J9#E#=5@zrkkDYd$FOV9J$Kf2rQwxyQ( zP5!Jmh|c0qvz4{k=kz>ygT2UB1Fw7JLsR;{%KaNTv_z@M~@xjq2K<$X>Wk9F2Dya-H6BMP-Mu-Jg zcF>=D{zW=FIxviL_x5eu=DFuzoYjV`y=Vh#FWOM__rHJe-U7F3V!M9DkDlzpcGa=; z;!kk%pKT}E`*WV{v1heCLO4m;K_84ncx&I=eDDM7&-vY3``%_S5}DHtE1$-;ZLDmZ zuGjZ)EY$4|3rTVbh?+_PrpWV`3JbLNflXUj#K;p#L;LPOG7UD4$r^dL(8fa1d0q% z%@=X$$^l+};x$etEwGXtd+8+(_||gKQnNDubs?r<+DF}Af8$LytiOnch6aX*hk5;t zH!HDSWwIIB`v^b(V>ha92^W9#6I|TUgzvReWB?Ti5U_?Bik}&wV=;yZ5DXYl@ZE-s zrgfXycEuW2guI|N${oNJIf%Q^-2o%mK!efF<}ZGo%g@}3$50&f%HcY(@TMJf%d&Xk zrI-1$k6+IVFTIRqE#!WwLg!K>;&H6_VV?WlGqiv0T0Xk&2*25%L?;p?%x1y?V0dgq zLx>OwP9{bV)@coDLKao%Vm|%p3+Vax`}xt4VR9N8F8dO>|Ax;cV;s1*zw~tTH%`ae_%%A4!ym1@(s$(()Alz zmD^&9b?A~*dv_5ytzP(2{dgv&* z<(F~o$FAertyglz2hXR)9OCGqQ^aMG7Yl6O8)nY_WL&ulpGOx$ltZedgs1^>QHu~Y zK!Pqr4UnJKm&W^VG^JaMd_-=&M( zPum=f4AY<0QVuDqLe&R-E@$%AaEPts=_|RTqv;w+(lsY*6{7O0KIn(u-h1vPtTSME z>bNCr@rP46wszJ{eP?B|t+SO2V}1O1$SyFeySRQ;3+s|`2E86y4TGRzvAZwEqX`>( z?DrQWh8ghH(dkkImBp^!As$JLF3;uhvAw;4D|`m|e3o6&9Ot*WdG$zy=X10E?}{2z z^+BKMWv`Pc7awU4kQyB14a>*Y*)I`*_XSKIKOSMf1%bK-?pPKkb}Yh+V?kXVaPioQ z0q(OP)DY%xmjrp|y%>jZbM3M)>(WE)=t>hc+-zIg$V!#x)j8TWQCq4$=urBIdZ|AK zt>F+?+i|`>V$bO#a$-2nJ7Y~-iByu8b1iJFH+eZadfSm$f_KM$Kb**NVu_y)H@vI7 zT`!ANC8s!`N~|$zfiA_9 zKW2Q0cb)a$6!XLpL7$Uz>5GhMdim7yFkdymK+_!`;itVB7V2B>M0Jz8XCZ2UW3FBk zLKG)K7orA8(1oZ05_BPIfCOEL8X!Rzq6RqPJo-2(hZLeP$IR&SS`H~hm2t$3J{KWx z@CVr5TE}{y3y-!qH8R4_dQ+pt4|6z1h=ow~L7$6|d;LLfTj8U7AjZF*%#$=smbxJ` zMPR2p##CU*9{{mHsy^tmE{9Ym8(RaoV}sli&5;AJYz|FzIcdo;qvellk|RXCemYzR zCkOiZ<;cXOOM#@AKUE*}Szpki+^q3xdec6>a$Y0rJc?8%!;AeRJesfy_qLZgW_7^i zC&vf4dy48QDUcNNq3VMUvnz*Gra{0kxFBe9PuBoH&q8Non6EDnlR7fUvjt|xa>pEx zB{)!`Urq`n#e8tgt74|*klNB@wC3bcoL4fVyTnJMao%WbVtv41rw;K?=afTEJ?4)? zqsxw4rK_lRP1 zl|z<83M56j91(OUmP3|93M56jq#RO+8sM0t?m~#-BlY_;sp`_C>I}T50DxhF+?+i|`>Vwe3WN)e=3D2{oh%(NVG(lUI0uJq=4x73_SIB1o}N920c?;@#A@4WW~o{lL6&h^KnNTZ^x2d{0yH)RxX?~>=M*ijy@Lon8(pMUY}P91-+d zD2J3HNHH5y4k<(pkPm1JQ3E9CLeu~Wx)3!$f-Xc2ke~}u10?7|)Bs1EM;|BUkU|vZ zm>FHlA%!T;5i|ONmO~0r1*$&i3tbK=L^Y`Tpf5~0q+(@bh|h+-EOjeP+v3N43CeSR)qO-V~+1d@Glx1YURubaF$qc06I7gil zCVq@anQUkYu|4deS=sa^l04KK=bdso<+RPg$T0m`UGid?!uL11ZdEI*qka4&R{DY) zAquJb8GSBhax0(bt>`d!^`}o?X?BKUc~c{Q5w!Tju|B?eIKuPP@mI?Nw3IdFYPv>} zyp<_8qSL*<5T#Z9j6N6Tkb{XNF95E3Kla%>%*xHCkc%UcD6eO2G&V2xC;9LyKbN}` z{6`i{w~y^hgKY8|5_DCV-e5(4OSNeh$ zWx81AvN)c$PQRFSF3SmB&vKuFtjV@zVb*72{I9MQQRU{kZ7b3NMQlSRTqwZ+oN=bACDIQJaf$$bxK528DOh z9{uRgX{}MFLD0@KXiXTB%M+zY(C47&v2bDK=nl%F~@US>ZKM>Cs(h<#w~&$g?l4u?>q6!%c8v`>SbC zUu1dHFM3pH4Qj$pd!fS0am5NKZOdXrd1!V4W4q1DZPJL%vAo7`*UhN#vJ@%ATpX(Y zsQb8Cl|w3pODSAquRT*K+)9l;I6BA9Sf2Bnf^2Xr48z40EdkooDR$?;&Wsi}ZfSJm z0l;8Gqo1}SujA<#J=!EJO{2$E)?ejf1(f+rn%x->SGD+QH^4AlY;LWiEuG}eYG!?va;k>Qvw5|L>aJ=(k#O>$X#6FV9eR%(PDU5U~6tpyY> zpv?1BcN9;1klR;>@G<&%fS+}b9*Coi!pl;m5OeXW`i7ZLe(nqW{=s`$e$%;s>a_*k z59`zSS*%=j4SZ|4!LJVwv3JJL#^~v)Rnb1~TojFwbH6=v^ElNe$IR%}R1PUckL8ia zY3?{8=+#yZDMgRvk;iH7RQ;~I5TZ249Ca5$6emF!q6SFNg{T1%bRlYh1YL+4AVC+R z1~}q8`Zy_v6rwQ4%;<}$98!qI#t}1mrOP4LEotMnusP|k;^DqN{w-4cDy*rOskorI zk&+NL+1;<4TlUdzhom&Dww`k>EUIb?<>%sT#4YvKFEh9dl9Z+7g`XpOCz z@>0vGm#G;{#JP9GV#v~H!(?r9h(BLmPw&wYx+Lpjj#YioD_#ycMr+7vjaBfs(_dyJ zoh35QwLNucxGMKukak`Y@qRj;q3Az+5 zikWiEdGv~vLn;>^2zgoIHt-t;VV|EH+x!GmDc+uH=ALqyZZo+(|*XVW}zJk0}R zKfCH0_?EU=6g`rn;v5t7YAT0Z2vW3I+Z++}YAc6a2vW3I+oT*)h#KISqwYe8;w0!o z)Bp*(5H&!8E<_EGpbJq0B^PBNKt8J| z=2+DSz2fDN%4B0}0C#MVd!o6~swd0lP>IV!r(UL;O(7RYB2iw?+GuQE>`(IHRemma zC-{%-*-MjhNHIHAAM}b}(4yR|@oIY0KE85ZBkMehR3^iV{Ubb@u$al@t4tTmTo%XE zXU6MXmJ_<3>jmCMSv5EBo zgPl6WKTWqB($-{9cqi@AkN%w2g+mTf4k>2NInQ*Z${}^0t}HYb=~tUcIi$AobmvW0 zcnwr~^pA&fyIF4J*_STy~b^<3sr7)sIESkEP9mOWz! zx@NM>ov}RUHwD??Rv3nhD_R1yr&H`6dn+L2kYX-5W=5}AIb;s{hNAq`4Ds2{cD`gl zBAe!!uHn-M!KS}VO;;q!FN_d3tZL_0rRhr~xT`xs|JYv+DTfqu$uU8%Tsb75Z4M4Z z*)dT5_LN?x#h(5Mdx})aN;#yMOO6S;ltWHU${}l;BZ4mFkfljEWNnjjNFi!~V~)BD zA&Qfr3sD0k=t9&03Azw9K!Pqr4UnJYNn#mgae zhWiem;(^n1vto5y3t#i5c(Z81gr;jGN!P5lN{G2r^+B(AIV50fIDO^0;bmik!T#Pf z(PGoJGTGMI%7w8$emrEO%P4Q0Ov)g|+^YJZ&-Vo_4Z%7#=oELwrzo8$qG)+zkp)Q^ zq?jvJA9Sc}Ipkz6HikXKVuKu*!FAll${P=zQ|~-V${@vDam+JcsdC6k>+t!x(wpa% zQ#r=pp8G^Qp|(keW@Sb1YHQU)pJigTX%N|i&NrdSj96Hdk1HQ_yO!E(qsI;SRO zkYcVl<~(}E$|1+^VtUyURO}l{6RW^EGbw`Kgdga)VzV9%64* zKjUI195bU!Ipkzg1}P?TM9`%i@?25|DJH7=U3VcwX^uJSE`%sff-Xc2ke~}u10?7| z)Bp*(5H&!8E<_D*#Ch~_QVuCZVUC&67gaeVF0Nm-gs+FD_wXvy%cnY8`R6t5{PW6s zmQ-l#MD20Jj9$@lNWgG=`9xcg57)T~D2uL<1P}HlIBEfyT(_!~)zLnF5}VxxFRh^< z*Qf;FJ(L_Rj1XduRQ-%Tcjb`7+|{2xedX9`%FRcYHL{5$|NFgD{LQ;jcAEjdzQj+% ztk!zYiU_$4(iw~43WnU2OI2%9)z9daE{9ZZFBg0B?CeR>WdY=PZXnIooppT3ouap? ziH~{}p0;Mb-J-BkaejDev>VDay=?EO=fi#zBcJ8DqGgbZ4=!op<_3cX9p>-rZTh28 zejLf8Twbnk53(iT!mn+55(yseO>ocxl!u$nZ^YBn%e{%wB^mAjcbwgNlpnnc zZcTE8h}Tbt%i!cdKffHAxCK%~SqrK@=#{^qrO@=M(aKGS2cGSX@T9+$)zQHTyY4PV z8)>}k=^o_oGO)a#2e`XP#T7^qrPEb?(4n&BkUE!TZ!XA{&3<0%P0^#>Y;N(> zs-Szy{b0?Bp%GqAk5)k)A4+kodFnFAvl%`=mwR(O)t#ir2FUSzf08R#)^V{X$>U|t zmd9fW4onepkRnQ_bIh|*v2sYAZ>11Iafd8#{#r#;B+t3&v( z=}jd0S@-yXIGZ>7hgq{M%>OvgCT3Y2i)T2Yr+!+ch_d!neZ!11%OU5J6j9a!$IR$b z4!Lloh;s3BM9`%ia$!jk<>FWMyY51W(j0TtT?kQ}1YL+4AVC+R21w9_r~wjmA!>jG zU5Fasi1X;h)Qw9jP8_jNFgev>Zcs%ltT(pIaNQSJEI(ORH>3h zW2&Oav8&L|9~q`EP|qfxK|Yt}k?vvM^#%E2i=R$Y(>*fG|LIB5r^{RCSz9PYl2xke zgYJxSNI;b;S#+_jWKpDIZOF|VCkOa%S;hJ#P29Soi9_)Ozd8~lYWn%|$_73i%y8GR zjV`&)!lX#D`c!?;!5QU{hG*)M#gavl+3|HI*p(Sw=i9?sk_`s052Wa`0XxmBi4Zq> zO%yh#+SBFXCGm`AwU%OQ7_C|N976qy}g=b0i%ZELcYCVqOEnj9L_P$)*b-DXnk zC`FRh$2pI1m6t;r=F}yNrHdk`Z>GhLQY2Y@s{W|^xN0wlELE~tvM90w>jWv1bPC7J z=v7}1S*m2QWKrY{lu_&`MUu`?^$jylE{9x*lEtalDMgai$}uy#6UreMqGWNt*C|Dk b3*-L-TT<@=yI)`@00000NkvXXu0mjfatxHe literal 0 KcmV+b0RR6000031 diff --git a/examples/natural_robustness/ocr_evaluate/image/result_demo.png b/examples/natural_robustness/ocr_evaluate/image/result_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..64f28286d66fae64f57a34190cbfae29d1382f0d GIT binary patch literal 45458 zcmV*Cv29L?P)enc)53IrrA0rKFN9%Nt(6mNCWLzd}u2oOR;6Q=`&Enrp~6N2%Av5hy`vbB|@D%Ez+dH*=KB-v=Q zB};bs`R0>PT(|B$_tur7`_x&UM=if*g+&N)TANy*^hzs7%yO_GJihMv$QdA%qY@ z^pU_o2qA)jqD>)AHzyPrd~)q4m^XLskng{@XAh73>#vTx-FRgc7hJlOXgtolTi+i2fvS>= zZ~t@FEbst}4f3Sho7wux(>(R&Ve+GDqn@ko_!4to|9c*IGs~!Y6HnwAA;kC?hmL{? zX;X+{)YjILPNyjri<5mReNHGac>aYKnKOGfTEBlJ!WhHzFT6OWi#qrG3pw}v3x|CF z*T1}fkgb-~Yd_-$PwmI}mCU&4qulnfwPad;#B+ztn65_%Ax>2l6%|~3@x{!ae-3rY zIsiI4JJ`8%7jL}r2EDz#r`37No#$zSz&(@BIs0tZtXV@_Ya0(e{7bUg>?B{Eb3%bZ zTYEciy}gAGT(IPQ@4fZ*7TVg|$9!QY#$t>i$oH^+)0^zR=0;{TM&XdV**b9!%U4~? zrSs~EljX?H*LnKMm)V`x%>VGMT-g{Uvhqv(`{fpM@Fo8Fk+(@)_HjOZ$y^!}F?6wu z=B?}bx2N8wwK&uW;>2j1p)Pdv-!wo@k}6rvp6eB;`qUO)Wk zzaH27qxJQ#eeJmR|IT;6dqU?KNmEl3t8Q3Dp^#(CmaS~+cmsg?x;idcaz2+_d^LpXbF$5uHRFVi+k4>PxSYSfehN3=^hut4^0#=NX3d&4c%Db&v_@{d`4%4d z#RC)ylmDDJ;W-Acy!tBVoqI0PXrymjK40LKS6>~M3mY4cWYa=s``v79FSt#rfn_({ z#Km1t^0V*%fmSt*53ae5+di4+-uqr>=Wie4rL(@ooY#LmU|1kRRrhY5d+-VNb!O4Y zMci=PYHnQG!jE2R9sC$o%OyA7&Q--1_~ktha3B|B?o~H)>n$Im2h>K(>?~hh4Um(@h zJ*s_cYZBuc7{p>RDl04S{i7efK@jlRV~^3*HOWB%fK88WRl=q>e$RzV&ZnlPhOVwI zHofut@w&7VVCdewjvxK58C5x*i?07D7tO9G{`x_3fU2sZD(Iqfz|-8OQgl*9qDsS{ zfq_r`qN`YW`9jXBje)hONELqjJ46QC9ngsyYVMA66cP5igBm|H(MdP)fnTcy;9u*7nJ2 z|5!COHQavNZTNnKXP$kQYp%W;&-40%f?d0JpY&@Ona0y*Y=J=#1gwAQWj^+iRjhyM z<@a?lO)P~i^pfuBA?SIR=O25PXlf|jU$$nj=`?9?3dt%U49ZkZEw!XMkaiP6 ztOYgT*{y0X;uD`};?Tnn@`K%76fD$y@Uwh%N&nR4V~;UCJ@gd!^V&~;!5e7_1caDK zCu>f@U?y`YVgCGc*uH%`!6;_32gM@Ww{K_u{Bw@`fjv>@0rzkoZVtdm@1KZ?2ngWl zpx~j0enD$n8vrX;tiTvcAvk&S3WlCr2dD;?fB6p1-u*jL(-t#5Q9;Y|_l`Y}!8<$N zW%{&fytCuoarg-1$M$UHcN>@T<*S!6^PT57aAX^A9lo5)S1e)IZ{DU&P36)Rmr&pQ z3R~MPkR{V=shv`X$3cn!(HJo@w6}Fru%P0zx$L5uc*W9Zeg84Gbu%0He~=HYx|qXH zzDaXWfQ~1bIk%49cX!h&BMCx?F-q1Z*?43VFmi9jcfR{w4j*ad!V8C9>2;##8E7o$ z8OlUGxKF0Rf`W!AQ&@KSGP=6D=*{->ksDU=h{S%Lc zUuMS~7+7oh-3#l-+RRB1aCqa3Y`x^;Tyw!D?*IJ}UU~QtqN`T$g*!h=%=Xf@`wbp? zS3FnKffvf|8+vf?IFBjbgFD zgTMF%xm=E_s_K){<375%b(ZEgUuD;T9`5p}m0&Zx@xcGb8^><$-OeN5+urv&=-SD% z5AEdH!5bm--t+wP500K-lX~G7{KE@J@A={g$B;e5rpNDR)8oheNrVt$M4hQl8mBg} zb?ebfAqP%A9H?<>1D&bPaXZgYPV_uOIhk_|PPWkm1*OS`xm@m~T*K(*))_MC%uxyY zCN(e^JW-JlLY$b1%3}bNJNa zHEX2z$6`F^FpM@RIGIK>w=Udz)N$p}V<3bO6KpakAHMPX-*d^um$3ZGD~DW0ShD=e zD~U#9yz%?vUo|#S=NZaG_Y{oh`D$uvIDGgpJ?ZqM1O)(Q#JP2YlfXa-Ax<;BnG8=n z`2?R>y&9!dU*e&G0gC5&d}#S{F1h3qezk7h3FQ_T>O2FD$9YOddN7a2gF63PQuqt+TDH|KWc> zdYh_5Ex%@kEVdUyoYFGgDJm1SM{S#T&is>duJ?9LUfK7l^+}GjAK&CXDWW9Cn@XH_ zwFCx2h*MfqMUMmqLQIhN*G(2eh||Hz_(~&RlZ24KKnQVq%H_wucKyj{Q;5@z1O`Hg z(^5g_$+L1Vbj37=0WxFb2*`*io%Ik0OAHVY1_r3Q&?!9jk zu1E+mo{WWq&HE?&GK45QW!Yn}?9vOk;*#+zfx#=Uzr`!BzcuD|yozeR zJ|o~K+w$CaQPuD}E1%mhoW|0YgZ#xI^Zs4~<@1G$rg3Sii$hg$W_g-=ugFuoI`}^w z1{1!2aYrZn6IGlS)zqmXkMC^fhaIlHNJRzzS5uOYRx9%PJWm`baKSkdp4`~VqbH{* zmk>gTGgMgx27^sBJZ{S8#syV$?``F|LIpRDy+r^%Q19`xH(S|Iu+$`L`TluRXnmuV z$HE8uh4nr^ed`F{DOegB>-kYr4I4Lh@`jD@>GSHjth=2%HurKwN4RZK1Lvs%Pma^I z2_b|KC1n*Dz{o2OMhYIznpwxKK{x;5NPvxw*Hu~GY3bqhg6mJ{N~QU?Of@&ud#np< zg1mjGhd0Cb+d6XWXsTpkOz{U@!ODsp55Cn)ivdi5U+&FtQ|;J_dk7(f5Q8bZzyM&u zm;d9S+u!}s{}}c-T~W=~XIcLF&0Y@M_b=I~Y@EWc7F7@fFj@ZFtF8QM_!8wIMQ7g7 zH_gfhv=yK(rcvS6bkX*`Z>^<>BBCrh>X8fr4i7kw$s1Znb6O#U5aKwMWnf@~fbMs< z;>9bdnmeE7Cm&_z4I>W<#1kH2ECYi>PyURlvpNaV zmP6ZTGv(3`jq14Gwhlh^n(BL0t5_BP<-$s~Z#lw0bsK_VkM1|Sp@xq~vi#G*Jf(VL zbEYR)+t$IOJur8A4Qt|g{<+;?@P=(G$n^4TCc*7<<80iO<%o`Q^Xy8ds=_F*MhGE< z7`?I#3@%vFg&$LZVp{hB_P+6RjPe8dGi~2v|8ik@;tJ&gU7OVWV@L zA%r+F5*P>}gb<>S|AEHg*U<|h#K~d;m|Hvi_Y<#Hlg$jfix6TQNsoaLLI@%HNMIm@ z5JHGP5*P>}gb<=n+2t`PXxL{II2$_F+;%oyajl2qDC&rmO;k z@dyfH6$!q2R-ES66yG^eB%?iML?AcJ1O9lA@e~9SFbE-pI0cneV6bD$W_E1ZeBuo# zkEK(qh@{&1c}IZ)Kw#KW7|Ss}tL8PZFw;$Ytb*CT=0NKa{<(W#WZ?E$lJv6$zzjsoX2M%b{s zofn3?ej$VqVt}#=3vYEL^a7D{dSJh88p2Lh93AXJ@v4aTL z%}lZ=+sWPgvvlYPD`(U(R~6YXwmTw(5aJ9|R)GPGyy9TE;6ZtsDy?Zsc>H{SD?iG^ zoVp}`GAl{%?lztq{Kx>jplh~U)IfO2nsycmgVZovitE0A%r-|%JRwK-E!xk+k;b%EJ%PcDaNRvc z+uti;1GYBZU__t#h=-;WKzKAnpeJL( zV~1vn0X;fOQUhU+g7Q72NUb;>?AfztvM)ghF+f@7F|aS*M|JLLDm&M~`bSXbFB|27 z1vaD#OsTErgHeT6iaB-FT$o zKiO3MsE$3{*}=VDEjP`X#%DF8^I4wV-_ z*LcMO&o_6nel#lzY=NhmJ4sBh;V+vsg+hT1omuu)_?VHR3L%6LVt}#?40N>rd%9Nz zb(bk@&_VN~qd3?a-f8XN?$+Zt;jqUPdE&i8@7ucdU<-F0vn?oOd2nxr2S?L}>E)hx zdi(dtXL)FEmWRUEeq{~cw1$)f20{oSjzd`n1_vHK^RfrU<1x-QdA8>*%8zp0lnT6_ zPIjJow?YUZ#2A!aU@+!f2B#^{i?MoEk}qk{Sekpfxwj=p@5qM=A%qZp)beXq$ahL1 z#3?P)ouV=!>uiLW2xYk{K?os)5GO_g10jSELiCZqKnNj(5Pc*t5JCtcL>~zZgb+dq z(MJLUA%qY@^eMYM1|?04m#|~Y=0Wosj8vLg2qAFT2`!W~72f%IDhIQ@FXp@Y_8{cr1Hjmm!1@LQIyj z3Jk{FQ($vE@a{n#qExR~v7oVmuf%%!eR1T&rzUIp5Je7`XGe?>LWna!Sp^0=wrpm{ zmd#^o!&oqcS)z4}r8SzZEm=B7NF1%BmE4{T__u=vJ~XFtm_2<~&1+y$riZpzl&M;i z&;c6{r+7SVFyZ@6=`LF1mCW%K2^H{KODF5n?#ldVoVC+xxIC^Y7W2H;QQ(}$2pe{{ z^TKG#Gz%ew5JM=dz+k)!DAgn?xzP6VS8 z*&0AqWes=DOp@Bu&WquLeLa-o5x)gCW*xNW;rX?BM1cct^}n_j`}2<%ibE{*1SW?zd+==R{3-y3}!@d~bp6?x@gfztGXcx@eb&xjKNusQDA+0M(l zirecgPwefb(~f?bM^DlA_d?i!t|C-NG=y8T#!`w$thEFvd_emi)h3{GK(Jzq0jcu* zQ3@f1m|*4kGzkLi-fgI89A?eOtbYu>e9dt*pjeo!Alci)YXipYv#Ad5N*%ptyf(@L zKf>&J)3`A_wn8iB&6&oc_ICbzYk|Q!9O@$;8WX2jDfz1kApMCfF2zsse!OZ zLHQn1kX8sG#CRynz`(wEAJvVW*o-B!dk*UStB&h<<;7T@P`rI8OKOBkhq>+)|4+LA zVXh+;+%da?eFxfkyw^SW8xz%B;pKR|vp_D~ud%k8x4jPN5d z`5c?_)vRoYvHft44jti|hB!$zie(c*2qDA>$}%vRc6kaN1yEF__tCZGezK|hQ6FDd zQ^k2E%RT84CLIFSn4`M&wK4=iHVEhnw@<0AVuf#bc0gdTJ(XidV*}rcYrJBC=bJlO zKbjQ?_KV4$P@k8`gI>Mm2*po8W| zM|XUCM;l-3II(k^LWaM4Cv()ctp{7U>zMt5LY4>jW_a*@Efjg;y+g+|+RHuf^!D$O z&+^dTEDwdR{mL4?X$>g}41^Fuj8Rzz1_vG<>3ja^Egp|?w#l!wuT^>nhc zyg#-OLWl`ac7efo=Q21Id0vdwvyyyCgT~U_)6KmtIeJGvR0tu27?xUo%?kORDTFws zWx7*TCS>i65EDU?`U@e15TcI+20{oSgyui3`&|7FJZ@)&4cDO7^yU~5JCuXrYg%GgQmqxSh)Ct$-WFBgb?DiQ+7QD zV+smXjL$5n=PK{$&DQ4mmtAcJRgHxZLYx80DlpiwWivarY#vh^#zJ4QWgTN_jb>|0mX0A4M=R#m)pAoZ#*B!< zGXZOA! z6j)GNO6VN#zSh2-`RC)d= zg%Cnau=0GG1OayMHdHhYv*u&gKZahu<~SNqEKF9A?Cs&T0b};rR0nsZj^0y8D){X5 z82g&r`LCIPyoF@l6uwzA{2A&a9vTxq{5={X(33IHH}P?BrWnwpqa-yD_9!UdLkiLg zA%qwYWtqpozIY$ixu>b@TnFnPL7l(sxQuRbv&t$nLJ^Z9Y0GrG3FYQro zn?03fMMKUO+0t7ei1+U^rMik0zTw%<0=aP8_Ee4;jSYM&uJMWmo^S4C{b*Jc*aA;A zcaoT1!(TRO3WWk2IzJd0LY4>jW_a*@?G<_Ay+g;GqL+K#>FwVmpXH&w zSsn^s`;|3((;89|7ziPR7^AWb3=TXz()aw+TRa})Y?Ehu-lF^{*G;Lw>*-`?d4Fsn zgb)*;>;i-F&Sh{a^1K+UXC?WP292e;r<;3Qa`cXTs1QO3F)X$GnicXrQwVWN%XFux zOvu_BAtpjuu1XL>2qDCYk-$I*A%qZpBrp&{2q8ot2@He~LI}}E0s|q05JL2kz(5Eg zgb;noE{{P;)8ZxU*s^)hyapqcW)?ySAU{fxef#BIQJJqh*A2R|MJ(9eF@?u_^ZGEFJo?_>;i*QP;jCe zYiqbU8Dn~H2_r5f(BR;7u`@1vjANAE7@BTsvaeUVQ?%Bz{gh}Ul z9(Vo4*LY{^R@VLM*I0Y{eeGI)&5ChrtE>Wp9a}b!dHA3$onbxTS65(AN582eEUWd| z-QK~byuo64@koX%=T&f7B+YN~;6>uBosnQ!Oj9ftc(tR@-^w6E1Aa8l+G#ah9@i9$ zd0y)%a86@{4TB6lESgr&^)W@XaSDIkpa^<<`1^wyI^Op??6YcK1B)^}w8f%K)tZD3 z*myX_<7tBdv>#*Dv;@nmd@8KrP&&<{Eot5f02FiUYPl&HV@5>bnSedrJ^bQuFNbV@ z`%URCTH}?>@f8Ub@LEeJ>jo`VJ<|1z+}^PFX*A`kg%Dy0mtV1rh3770_F1#owrxA_ z?s)gK3=YOiSp^0syMU7Nd}jIqd$U2`!_OA-?6cL(iYv%_telx-NiM~I+uuuvityoC z^_;B=Y#g+~2-nR_vMAfh-TSk2=m;xk)G=2T*)YiNTU*+Bs$wb&I@(4xlrX=_QRw3P?`CKr!iQ$o@s*jDZ|}=+7%;^=>kf3WJ8#g@ zI5(e_rv_elsLMysNt zf>o<-Kw*hjR`A6y-@*6)?tgOV;GxOBv`JG|fdP!X;$Td{gI2__Wd5x;4x4v%JBZi^jM#VXNXP001BWNklVnFIRek}3YI_{nkCjww|+_$rxm%@pVJ;ec& z9Bn{X5vn5^o!8XZfc60sCT%{T9Zij|AP-X$)vT$HGcTgRg3=m4pCziG)M(aNN|$b} zB|r^((! z!0z3KipF8qe9ZdC(9741xmg<&X)b!471OA!>&jP=2(xsNx3d*w@_7f6WBsEhOuv@XeZ0-l!oAbv+zAm*W`6f1iX9Vq_~m zd>vD#O(jsE(P%72fzlq=-*5xF-`&NQx8I)Z%baj!nfcJZcpufdr>X2*2kRd}oxg1K z2P=iI6n^-u@{}T?l)F*3z^kbu=hP>-Frv`f=kkUsre%BCSOAmDu_+&Ad0m7k0GbQy zDrg+yxo7fj|CJ3f>J=z2!Zi(Xl0!Zx4e7vA7xhOK(KXgqvplZQXl5i6ERW>b*b`99 zW!adEa!o@8(==%9bJf&J8nYRGp9iZoF$_oYE+|l5lxu1uL-czLaqdJlYm*UTqqrV3 zWGmsPKJNP@gb>4vMx%Uu?P@&F#~6zZ|IT~LqKu`ovXal=@kb=4+@UOo z(`kO(oaTrHDDZT1it6b}zL79=8Oxz`jy-mWN2)FGRC6bZ={5XilcrE8u%R={-U=Tx z=+n~hR!4>pPOIncIz`ai&E5OcZrptk+fzAaG&b<9xW+3Mc)q!l^#y<;Pd9fEotEIv z1+~O+`51oK>?R(P%kVGlQEr<(m1RXk&KB9yTOf#!>J|bprMik0zTw%<0=W^dX9UN6 zpM(%%Xi6!rU9p0yifXJ@7!%+r?fPOZ7;Etq)YsSZ#V_5#55E8R6!K@z@~5&440N>r zN$FKV-DL_JbkO|h$jA0{wsU9aFgw_QH;=UQ=8?hQF6Og5yf@3k12?vn{t@uZoEDU&GJzA+OMqPo7Rvr?p(+2-;?g<`@6bF{vbe5$nxOc3=acg&1k)5|^Y^iR~;da#ANMtwaaw>Rv45<-aMP*-2iEw|o6F))~L zaMobaN`q2p1=d)sv7EQ~Jg)!n_59}7znSdIo-kz@7#w)`^jUuzkHIXcSs0Jne#octxDsKXV&D_}&j_Z*QN}>z@c^6&Re3RTIluB z0sxKg`<%bzJXT&mel-bBRT3BoA%r+1tX{i@STv5Z?xAn3g`kLMKwH;qpin5KFxC=; z^%9E3fMO6}EUdZlW1Kzz>`DG4Czk{ULI@$s-t-wWxbT7x;D_@EFcyn)In!)_R-g?i zV^B(A!?}n>!upa0_LsH&=(%#U+YNnjv^5Tfj@x$#E)NCW_7Kp6`tFrci$ zT8pw48(5bH-o1-fF7JUGe(*qPW}P*Q+dp$V@rvU}?s6KDz(5EgL|L16_Sr1CUq~PBtZ}bhf+OpC4Z=AGTDixhMPt!mHOgu(yL>6vu3R~ZpX%h1z(5EgMA`B@ zkDG71l~_E6#b6aErBPbB2S19w@@^`efLI!Cz|BKYN}(;d@db@V;pqsgKJrl(EnYln zpY3ERtL2IVH7#DkjxC!9&1-NvE^8D*2yy%tG&OPd{By8jW(l6aM697D+shLz-MpTce7`~naiZqVo5z%C)6h1+m;htk zH-DGU$fA_RO)s!+PJvP`HL_M{W8Dx#*i)c21X#Cv0Szduc6|$%TzV;2UiCIFJpaOE zf8=pdR)N8I^b`ceB5!r%_^nsTpH3ZopHyc%f76n8%UlSPV4QH9mbP>c_jd<$1{SM5 z&Ye=r=Vn#Y@?JOlB{mR3h~pNGM!DtFw__~?O5=G38w6n8909Fe!6XwdK`_?3c?DKu z4PnsW0)rq3+^Pg@Sb0(PRbo`ibvbfx(U~o7u5t^O)Lb zOJ`UQ_|+8yifSCqS}0f>1_onqvpt)qFZ;HE!vza99`(Tf_wDVoYF-12GCj1#qDg9+bnN_Wv3uVjv|NT`6gvYPdYEDHij*)=}V`#t0jB zxAVf8Q(069QN}L$;HAu+Hy2|JXt$!kqtpxFxd(b!a#?u_8@Pu)D2y>EPotG3z`EfF z-kvnS zZtJCajOBmhHA(){v=}k1sl@PZYZu#x=`EOF z^TfVZ9tfbeCdoIZC)l~LYxv3i3Hc* zaJ`!k5Y|Kp!dwJyd7=ev!>UPys|>=PfiNSYRvPVlZXtqks~Ie~UIy#RE~(OM<9ZwB zo-?11tzOM9?!TWx;q?8MRaSukjJ)Dttii+K?i4>x1sp6|V$moo8*8~Us<^v3L&q4p z{d3)2d^@9v`4K))8^g;QhWoYJ*VV(eu*)Ldo8jd`152wtUhZ()cBj(p4Bzic=h-*C zg4q$+sbX9j&GXE@EbSHu3Ov`AI%-9ITIaDE40u*qyV67xB|hr z3`T)PTSX97SyWoP;34c02#iImq6-p~g0Q#1Q($c13NnSM5--2<3R(^w|IOaXkUbV2y&n z;3@4^Fqi`J#tACR<%$FWcK6$G zU?%`({bMKXR2KKp)j&4Y!Cl)9^3`nz`Rex32L*#s+NW`tXLWsK;K~S(h6wZ&Mjp>F z#eg2|ksR=oSH4H>iC?M^LYy)_ylNFw8X8dAb197hWeuJJZ8aubg6L8pgE9f0ufTKr zX=PAYJY~QHXk&;dm(##g9$4*S0ON)z6yPfZ7_1E_21P0pm3-yT{vDAp1@q~qECU1k z;(b)-o~E*M9jt!@b^fx^AFLFYN9c2m{!f~Vvg-$zKq61HEWX*Vgs6NtgU8wT%pm-NG4bw z$+NL%=p~aSCZA(dKFZ3581)L27vY+QILX21SqdSe%YqI=(eM)^}sRyl^_$91rYj=Y^tT zdLqHCjXt$n5tt%-dV2W5;S5JkxJ{_6sNysAwbXm=7wups!$0lsVQcu5pHf}L3g7T- zXMtSU#`aW>8I28mE3Wa11)gv2Wc_I7y4V6wHFuJjUc+BDX$pk`8#=S>t?)5poIojr z5R*kI#p)Z^P+L>mm&VvCg*67PRoGJy;Atot*U#&D9@Z#Kv4HZ!RLROCuwb=YmT0U& z*>K__Zg>JLp4J4$Eos!&1qvuHTneeMrpU6Zma*mSExf*QBPKX~{=k-HV4$P@vjDv+ zsJl#IgASS>9r@Uv&UWtX9O~Tyzun);@lGijsm*5&wDQa_$8SB@!d=H~3kq2t+?(OS z_w8NeiT4g2y~*@)&pW;Sd*rh`v^UE`;cLILhHqL!>h$@&6+(!k)Ya9o^7`vsRYXH_bqyFa##RGp^UwScM5(U4_2cTjAURt=#fO9oA1U)}oACk>FXEP8khY zSDdL-bjed5#u{$@^sS^iJ9%gOJCkr%PE1(_1_vINtU$ww$77sr@@&srlpp1~DHV7< zo$Qp`CWJT*X|4J2M^+J!#<9ls2L|C}!@yd=gsTk#_l-X6K|p!^6BJ8B1sY6%Thgdp z8e{^CQ7+MuHMo@wT4R)lQZ`)qP|9{^LA&D;H3@FKbvO2JbKtLV>NK$!4|QUNCIQYIGzSAkJk2&|j8fMwn} zXY907b#L06~XY3zetQu8=~wVganOD67!M1qp86K)9MAoU@>8fHe?U zgXjBU;v_d8!L52weVG(4S$ZkEckSZE-<|sP6i&bCl0pa}L}2K0P935M&nZL#10jSErzNd5ANknF@V%%T*0*ksKq+;x z3di}aF<4_B|Hif7@;&qm#QbapumK~53~c_!<=z6_a+)MneJvA%qa8r3Fp%xpe7eVYWi$CKFl% z7K%YYF>pzajV*NnxTS`caN;2bt=&+A>s=_dqp`*fE1)#SKoFFA4qSW2Sb`wHT3kA0 zt-`sErD974?J0~cfHnBq4KLI+OkvHN8yqb;n7z#$y(!T zBN#(q`>Q7Q%^?Vf2#hfqf(0np>b;(BUbKRxUA;?|ZHoQ@HF= z8G{OY5lZEmtp!heeJdQ4wfKMsto`I#=AAun;&0q2NMIm@5aP7+@lSl5_<*r`Yu#kP zqAg-Ipe@RkKg6OiuEc>$Kx7P{$Ool01J)HyBpe2yh&3+Hy;A+F42&u*X>>yp#ux%q zL}`}+(bZ1~TtXzJT~32AYa+_kW(We;Zo|}iZn^CiYLlm4rJAx@t~gNB;w9|ZvU$+F z24}{yMj?bKPZxaPLgp`Q3bPctT!ya4-?(gr;XDH0k5DKYj8@^4M2lx#y&9Cl_aYeU zRvBnz(MqFCI6M$8MYKwxJq5Z{z1E@w!1uyskj7P8q%^<4O<44mb~zTUL6uS~_bqvJ zt0WdIIER~VzL}r==*N?HGY3(YJqAsSm#}d01(SUlLI@$ondf_a?u(yyUHMR|mK6Te z)jB8@E;6==H33Ca>|1_llv}x=QAD)HxXB6OmwPt}QQOj7fq)<=x?X{BxIu-v4=@(r z_fRN2t@{!rxuFIhPwBAsf(pYFw@SkEd`vk1K`X_wWtXw+iYq4gE)K8kdJM)I6g1Y> zaC0)o?3hmtS?=A@!JEgV3a+WBV(rvQmc)IcHsD})H$QCYrG1E$!Xvh+JT7dgWot4A_Cem@6ORy_ve(NhoukLu-YbNTB*U3=A5pN-2)qgaVDiS029BeW{EI%Ohi~tDRsCCNOR#fw5Sv z`zj=ArBR-T!C=FB2qw&sNH}bum2&kL!ljH_DLfz_1a38iQl)~7VBPx)x4OY74a%ds zx|%=z_g|&Dx_ZLz>0rvL$6&{n&Ft8+c}#7zr8BGt{OSq}$2^&p$I_`)L{e@1yrVz? zATaD0q1^HCZAMsD>$AJPgH3sZ#qi>h3|G#p;Ic@X-{g;N&1coT1{P&{Xp2Resx=86 zu<>w;$I}KAzTcGYqBUO09AA-80k5@mvM%j@`Tb~|wbN?2JgzAg^Sstk;GD(?8+Nzz z!e~l23n9cLxai`GX`J5Zy6COJguMiA7{Gn&cgcl3lr4ROw;1cj_7ysOSbJEHFt0(l z>cA+_E>EIMR^;I;jj@IR;~wqtJr5f|(HcAxmUFg*1)f|h zt_q`DxnP0$O$+$gCsy;w!w*x)PwpS$vI-1NdI2RBVPVYDl8y7lv+GzCRrKa^ym+LW z$I}7BbQ%oZq~|l!57?XSKi(Gd?6cL(iYv$u-s=1+k0ufdFc2l6+%&f^EA}>>$E*Gm|XJc5?UrEFC(+${BUcRYf+O&`l9Sh?B3T zriPDx?4y`4!=VYs`O#th0zX_qfGcTNAltP&>sghRszR$`J zU(en>dwJ!hmnQ8#Lzx8zF!G9nu>=pw(^P3qQ^Mot`&;=@9_G{~`IA{mdUv<++=&Ev zS`ot@GZbOT8JMC}n5gsp_I35JEpY!%_hxvx(7@7akC!{#w%w^TJHz+8(s}kxuV8ir zcB&YcM)N$gFYD$+1O=XJ%W`#P+5IXAA%K03YYe)S@xa<}Ibqlnpu_nHC`{0wHrW`1((Z4iTjJ=x^=se9Vq8#Q zU9W|rKT~300~8*fI}lyEaHR>t`3J6T#o8O!vUkrOnhzY9m>WBYvV0nNx7<1C_TZP_ z8&kVBu;i@eKxYrHdmTNzQCH8BD$T35mcN}5Cjww|+_$rxmqwaEXe>Dtv7<&9 z-0Q3zJSYI@DGqqg26PpnI#OCr16gCkGNbPQL;(sP(7s2t3FtKaZP{W#syu%cgb-z* zrnZLbSKZ(y8|rYjfCgJMC~NRM9o8?fcwv$wjfeHLtGXOmFs_)Aub~haf?@%!Bd)fB zb$`!Q`0)3%hjAqn(8j}+Yj*Pv0u0tF*Hcj%x=cv*Iq+aBGK1>+BcU^)89p}22@Hc6tE_6 zQx3ES&vTUzvFLEZpu%M$^iX)(uFSOQRP)q_i~Tq+g~=Pp{rO}E@U8MhiJ)4;&Kcpufd zr>X2*2kRd}oxg1K2P=iI6n^-W_LL%`l)F*3z=l+TDYeynFzSA}=G0Ylv6tt~aAMzy zwE1eP$T{^1E{rI&_PM;FifP$iHip4sW1^b1$q2ClO*Yn6vplZQXl5i6ERW>b*b^{P zXkhX=HszzNY=}{>KzR|aX^4}Q6%ay*Q&44PC0Aa3wOc_Den&4F<1!7F>LCOH8UxzH zhSd$N4X~z25ER_Ml{?ym4`Sc*P|8gP)BwizRZz4TJfkpy4HFl+6%Hl{`htp5odtlv zx@!omB`}794Z;LT78AGvN*e7ZD_VC>YYm1XM!7r*lyWD86C$r&v4Tr3ot%0KW!Zl` z?eY{l3ZSS;@1twW{bW=1qdK;_wvKO2kM-S{FE5-9Hpc_I+>HIhogLik)pFCEX?#{g zI-lj){at;N{*J#*%l`HbeyVF(GjAGqD9hn=njbf(ITA)1Q>v?2;TxXqEc7jR+@8uY zqp^W+#Wh~B!1K+WtRGFUlP&O6b0>-EHT-3hrcfxbp)5~Vu*P6i*zvCbw0rOu4Jx8R`zRkgWzi^##-fzL+JHbQY*0k${_n(ES+LeEX;ctE z*aJ{>s}g*74o@j;5YAd~lNnIT6>KUkX;j8dc62*IKFH%KAB{%)F24a7H>{z<#7BK4 zpjCgZhOl?yQ=k0|2M!#d<Mm2*po8W|M?SWvvzS-`+)@c<-2BBGbz~@AUTX zkpzzG)392@He~r;sTPQ&@V%vaqwhzj~q<4jU-#mI0z%SG^Y$DSBR5 z0ny-cA-a_Z5l>^4qEIM8X|)0>oV4hM6O?j25GFjAr&U;i$vwPPsW+l@Ey_J>jB!H= z)>^c+c-l=yG}hv4w@gt3XfK=`>FOpxI6u)-SYyN0ldi`C3yHcqZu`uqxaaTx7y11A z)~=s4Wf>S8c=$vX4xTyU@fc^DJlpdYq;?8`LhRuj0%h&HfpUV%$%gr{)Dl5FYQ zyG4b03EWae?E(S|)`qJb!j%Z9{-D9Lnquj~t-%K2(n_ts)8RaZ{@#VsL`F|*0%I}O zecp2ypU1~me~fjHt)p0+m~^;h7Z{xMTn1&p^J1)?mE=nrG?wO`ZtiW#(L3^?LI^P- z5;Zkkc=5&IdVdR^Mu(FNeJl}Nc0nBm2VS^Zf$$-3(0#QHlnTGmV_c;~V{qjSTyTLk zVO|7-GT?ctKPbSu0t>dZ=F9fgKTv=&E)RjHys-MB+cz*4Px%CtmOC1k;LeuTKhQTc z5>9f!;^sYQrCt7maDwBt*R5dxzI|-m@cM+@`?3lQ&Wx&wgA5fMr*yiDZ}03Hr|S_y zi1GKS&)iPb_wYQ8DOzmc$|(0$GPDZMxXgp9l#V#8l;r!Gf+^)8uxRa3C?zWjD;;Xw zL)r5*T6=Cup|xn^_Q4oD&%>C&m2Nf$bOaM7DKgk_jsbX9Q*bq%!&L?zg+h^NBtj6l z{0gRYZleio02-orXcP8yxY_7^6CQPG&4U6z7U6SW_ze5@?c@Jv@6BWEy6!vA&+nXj z-+Q$$EEYF$CyT2@$(CfvlDx>4m&CDy*zF|lAm~nz2^tf?#>^j+Or{eIdWQ6(MpQN;#%CQSY&ajeR=QQd(LnEIOjc55+za+7peMw z0l}`ScbE4r@y_r3e&1#M-1(dS*)K^|LV*GW3KV$nV%4fu+zd~ph_`qE-p@RVuc@AL=;+$v4_U(M? zmp_fQ_NG3AqA(~>pg@5F?;32~wvG4QdtZ?7`O6_gB5|hgH16aBH1YsK)Um-P35~IW ziEaI`!w&_3K_c&<5wEO>4?A;XEJh917>qH^E`u=w#^8KVI%s4<2Ht{@d2mh;r=&K) z2rMoxAO?~&!TVsqr0VhB1vNz!JU9(qkgynQd`*aCZm1v#cZVo^;uD`_&)s`&>N6+` zg8~H#6e#enK$0YU{F9Hx(LhiTfQbx!Ma2iXVzbpBG7V6{`8=i~h6OlS566y!YOEwQ z?#vr5gYmT{7}i-a2=PA?yb8Sc4VobkoEAMEQ83ZsP_S5&kXVD}Ia8M=ICkVP(^FGe zYcVl{!NiV6a9|9bja&my!Zf6qCU6e4IXZz-VlmcY#L!x4;a~iVFS2^|>YMf)io&2k zfdT~zyeqI{*ABLA+jd2lK;m3K?3Tp{xz7nZYu9YcTa$!HWqh;c?|rZ;3haK@>c-81J5Xytxd5n6Js2)g7LRu<;C=0cDPMsX)<(Ho4#pj-5 z|GrmPm|uwFi%^UhBUl4g6&tdk$wg5tgdefSG{*%d7AWDrP@MEFTgDfE^EXJ6o1-5n z3WEX#3KS^tuE3{0{b|x9rLhk;5{!RroZDLwynrK+3XKWL1A<^p+6+VTT<$O+A>Gh; z7yJaGm2p_;!2M!WEk}AeSYNS~lI1`MT8rL$$1dc)DI3GC+s%#w?;Fo^o zGdJZqEGfC-mDsv-H%EsCuj*^?--oQRK!E}U-aEMe{`=Uxc@s!zpW|baqH}?~2hCA| z7cfn?!91icip6^&mylFwB{&@hRX`*!#&bZ!tO$%3z=Fg$BoVv|!AaUoX9!#eBMBA{ z-eHpzE0$Vb=j^$0UOBLz6UT=+f9@Pfnqtxt({uB@wEq>_I@(#gdUYsfY#K%p#$lbu zdV@DH$3a4pV|vAX5{;K)Nq{jn1U{+o*rz_p;X{WweDKf>eHKfyW3YASZU%PVdAn~T zKrD~0U&;U2eK|d`K!E}U3S5RUhF|^F&w)*vp+(-iIR7_cA*3N zFoC#dLJV@)?Q7%9a2Bgc<(WO$f*o?~npau7@s+!RgV5m+Di5(2p3 z%7nEx6fb&typWi{eK1LawU%FMha zvOo>l1>m2vU(oI4vox$$QKCT$_eTP zry160lrQFCpDy7(yobb8g5i=F>e(D4XU8~x{1gWdAK=W`B}5Eq+JYeDPROmWP_N<) z*m4PzgnJ`eEo*dh)EK;^-XlqnJ7z%QT$5Aay~mp1*a$8rNCtFub@9b7{U)!xy#Izh zk0n(Y+=xzr%X9Wm*7yh8%D;JS5vDc1dF(9TZtnesE&YA`UU`wdZ)jnyjLt4F`i}0Z zK!E}UmL}3P<=4LO`Pd#0(ZoWS`G-VB6Zid)=Q(M^KOnjn8nJ;O=mOV3#Drb8DCz>) z&%}VFU>p>7-;Q8_WTX7hM|q?74kH$0(!ee-f*6Z)LShZmQKKDV`#Cc@%H+%( z-nLOnOF%-ND{2f5sC&=KJJzsq^A>E9pjkZzElIRK4*Uiqq44o}h;>#~ya}TZV?q}M z)u>hgiK+$f94f-L?c0mO;Eix}Xpo~rgYW1Zm*y9E21wdl@Kru@YDVVqyUky^|7a$#W0Jf@#Dz+h*Pa@m?BI#>l!r6pr3tj=_>D3~qIR zQfEgicj+R3J9o7O{mrqZE#Zlgi+s~TS7$eWxUz#o$EP{^1}ECwX8F|td(D`hn&8aX2nY7R!pO)dGjmf|Qzj`{)RypGf}?RXN2CBLlHR@*H|)7A zslouRtvI-G#Y4jV-6f`{FLBtv^`lQr&odkfS!U+zjI3;7Z3;)2#U#L|nSIXRd=biK~ z>jPKAyBsSfBqw?w!<`_wC`BWtMqv=&t0yEcikHi33TVG0`C#@^z`t{pZyiQDhXjn z?~USAu@aVeRfp5S#&^!4l90KaM1;({&~6ul5sMO{j*YQ_q^FHegJR=oA++NYgy5d2 zMsR@SL2)3~U~PgynV+9!c=!nW_wHl#%qTOn^CW4Bw3L#OkYmVwWA7fWmsms1d+IJ@ z_pTjm-?lB-F6DKsdXh$AViIzf6a1D0BVqTSJ4ccvc$JvM=hvUEnSR|% zZwPu*+wk=QEPW}=E%<9*z~_!RQ=;1t)I5MBmaZFfTLlUfDDbxM@P{A4CI&}P3RoW# z2yJj2v?f8l4;cuF!Po@vJ&vGG5Q1fL;0e&oRtP48CUzZSH$Wg!jaLG%u4zx~1Z+~q zNU&s9^_(6##qjVkjvU#`nUQnYq=Jei*OXMsXs+Zy-FcGKkYMoMk-IvzET_G#gPl8f z(B0id5@nZG2n9|Ki3ko8BO-%$0>sgPQ<5YaIxC0|1x$kv0iY5%5c~G-W$etE8~$vT zWMQB``#SCYQ|N-SaAF;@>!IsESi(*_%2=@?q=3`QJ94pQ9_|nhoU1Z@6EukZ9ql}5 zYy5C3qt^IcS33{SP4P@sSkc|V!)cu-<{bVSkI>g^yi{-J(Y`W=&)1kVDIe~u&@DH% z!YojrK!NK;PfrgY`p}0iFM1N@=iWPzG>iujjI}tQ;i5Nz_kn}pG&&asV?%skB1U;( z7$xNDqS7(!yc1)i^I#n#ig#hrP0|vHEn$ska%zgvky8u}4Kh4@l!e7vaK z<#DV0IK82^^J zVA9mEW_B?|%EXYzTD8VE{>#^I%JW&0g~5ssOk+v_LfiZZGeh5?I^BC+$F_I%@YR*& z<_Gh;11p0}@%Uv1e@|x{JA9SDx~huconu*h8y`(PKe?hXI6PfrRev8}tr%>c@$}df z&s>k9Kr@~ko1$Z7Cx5inkYyRqPgObH5@uor3KS@CtMS+;9;2hZBYb}v-j~f+7p`e6GkU?ptm zq~QxKFuyp@_rCkRD{PZ*ES6+paOUp{kHITZsg&91>m05tl9c$^vKH*z6vM^GRG>hC zTZ~VC?lZJ@bVN@9Z6p9dbT07D1>#*?^t|&y^AAL`f@p8tU|rczP)LkBlF+UX#em0J z6FLd%QDaDLf-y=x^Gr`&;OLQK96EZK(XlhsYZ=xUs(8{QW&Ng&?B2eMfz8|LZ0~08 z3;)dI#S1v{Kw@;DP60=?UZnT!@1Pn8<01{81QRI`Arwl) z5bX_`BL)`>By}#vP{S{N_H!;?xNy^+(UL0+Znc-edjZy#`OKPbe#Zo7ud%sV{(8K| z;Je zTi+@ejRZX_NS-64hz<72Hc6PBo8i>x2uBVMF+6;diJ2*C>QND#8rpk$*}7vFcW&R! zrga-=Z)sz0Vv55D2brIl!nCyER7tGCJ4>F|NfX1)ox8YW&HCU|;2kz5Ab3?&l2EXy z4=It+;#s1eG86_H0-*>+P2fu87$c}tyz|V=%<$j7|M*RPPD`pVSemJcR~Z$&Ma<97 z@Mps}XBJhUK!F0+0}nlDL9-fzdtYYs)G%G8gat9wbBDJnUWCPZ&Z_0T+_ifLrBWHyIV_h` z3d6V~bU<9goH33*3=)c%=xi9@Y)pq#uomw;Hcjxkn)&(poBFJlRAEq{K!E}U z-ks>}?PXwKAfyQzL8H-+dWSVhVC*~Jv^s{kMoQomSTU$ke4b-X7`7W5#|Yj>?tqEW zjKs_VLt1L3Y&?rIvz#6o<=D_6jvPM3+R`mAK)zN`5 zDb{%|UAVx3{d;j)hAo#UX(%`{&8cNMVA;KU4{KJf!cxaMhecz3qlh&ms?qZ%#6>68 z2DfMR;3N_)iCGAQAH)!~n}`?|W@mZ&CqKRE&#EX43KS?%pukNR>l%i4NWBLDWRKSOz0#K9S$`HCSfn{w!IH2goy#KSd)+@Db732UKr=p zks};Be1r>Qm0$*T05sckO1wh7Bz5>%(|OvkYUyeI7b|fK#WBlOzcl z5=@v9$kda$jO8o(8Q8Ita=AiQokN@_HDSl^V-S=^}hq8CAf zk{E;sin@lmTZ~$k9K6T7aE`!DfRs>Lq@@(^JPWh4jGi6k;Gu(@I(dr8ixY@gY930Z z3O!xzY~C`!_JJL&S$hXbnqXDKxkZq~GkSKEeS`au#3E^#x=PdU#^o8-D)-#8hZX%R z$#RDTkHYv4wJE10+94o;#;>*lR2Th>RC3a*yn z^DN{VrYU(=V`y-Yv9YtL0cR{u4T7bfd-7VHm3Q>BbH@Osl0|$T!;nfL+zA&Z0t6YR z275+qYVd$}msJ)Gd8QI9lErJ2YpDuP{NVp(diu52x;F?U(+nLL zZyESn;7R1681SlCBZzp!1gAxObT!U`^|LX6hWrHQ zJnF%lK(`F!#NdK>{``4fe&NMi_RNaHpg@5F1q$3WeBx7|itd9hNcfD6z68OT$aC4< zP~vXcH<f z2DT2cea9}k+geGDlHhT9JvcC8FhHeLW^rMG1FsBn@#1-GQo`5@&PV4$4|(p{xN#%f zw{9a%1ix6rkl;0sg9JiSqXq6Wp$TE^APGrg$?FbLj~9y&h==Mu-}uj8 zr&g<7<4w4cr zZCz~Iyp0`u?qcnVRdls?gt0}whTB&@c*V zX=&yDd+(#KXBnE;u*Q%OOpMe-vmyxQO&Z)6Jp?6%HvA1-h?{mwIVoerCsL4@T$lt^yDRuojlFUd-pOvc9!YIDoMGOs>?8?7S^s^$F_kx*|d2R%a-?J z6?|U9=Q-*fs!GX(oxK=C$)sF7Kh6u!|AOhs8InpXNP=@YNs^N18O}Kdwryq8y7g%8 zFh0OL}6WApg@857(V#WgREY&8ub~d!N-7uz{3w2397;Q zPh#}2bC;8CRTcFiAu$z07DI)+Lhe0Ty~gCx)@sk|Zsm;~@^Gp1OCm zceHWmoxAAlXverLa0H^<5LJ@IV!Ys*8jQwhL7SvWZ8$k<2zYYdVZ(of3kqGSp;~jy+{B{5hUo7C1qu|np{P_UJobslV!J#R0X|UdH4^holq3e$y~nG> zIe#D{5@U*mKsYqUVpB_PKBvAo&&d-fIB?(b6W$s<3k9Dz@#|#@5Xn z>Fe#KHBHI$9CePwSTI6jOzcV|c$blyAbXsjo#DXHKIZ4=NlIl@EZzx8YN)y#FUqE^ zo7lK%Jt7{wM_m|4h!3L zyW+hhr3#;0(a8rYhCHwH++@b4{*>oWT#l_SP@url#v>njgx>C6j2b*ankY0(ijp8P zh$=*USSUlW5YR9Hf)q6Z&QP(Ix%pW}M$dBY^f`tO9ANz7ICYnYWI~hB-P6nZjhorF zZ9D7MuBNr6LTZEGfX@Q4PbB6O5aolKgBWa*;+--)Jk0TvM^O{H7~VV3aA);Aqob>n zUAuSD-qD75hl&e%4Y9bf2riBng7PAUK@lvTEnrL_BC3W6#YRUVkPQJ54j(+k;X{XS z*)zKmOR6xq`2k8-+g>XwDQipc)A0$OY+25h$xBy^5)wcd=q|Hw>=J)mgEhT9{K3i& z4jrH7=<80Y>^pan1zpa+l4<^rbG2xrneeeSJv^Ar@E6Ax7$@bC)jj;)YUR&H7C3*M z?_f)t+z{soOtTb~JoR3`-OmIFHgK7?5 zc=CxK-STJGz>+Eq;M$6VcTzlPR%bBl;K5}ryg0tdIZb(}uZ86TV^-emNJge+dA1td zyG~Cna%x!%Yg0H{zrJgmN(=XuGk!d_z&Q_)@lO{PcyN6ycb69U(RJ>|PfyQt_!>Nx zk(oIT#n;R)F7RyD$NlY=XD7q&PE5};9REHuUuR@xcrU|J=KZBQPmNRqFT!Q~^iq`% zwJy1TW(5iqcmsU=laJBe*$oI{KwQw|d+$(*`urHT9Qb&GL<6H>l^lwMG)=K$SakE8 zy?B8``}T3{$N^4`oyAltIQ6(3+B&=0v~_@;J9e>U!&=(f+HrXXJ_qM9LUdLXL%$ZL zJb(T1#u_@+F>%G;wCT29NN=R)dN)htKW-@@5LmR zcJG+F;sTmGrkCbFvjPPQyawGpJ$&|8KZ|z`FTudayAXw}4LyIf0S*#Ai#QkC>4fkh zE0oeQSv@!q9zJ@2efx(PIeDDw>@=!|YM$c^tX;F7ExYby>%cbFtXze08R8w@9B1t4r28knLqRF$6$In8<1e2p!5GjhwlqdebAHF(z`W9np zeq%d_o<4+>DzIiV{+aJ%9{%hNIdz^@`QB)i@5P_XZC(6Zt@Dd21eR^^$G%=vrziQ! z;iX0P001BWNkloT-mrVLgjUhjHVdq4bhug z42=T9(wD;Af(PELD*`@u%$XA128bO6Ni1F0etQK96nKwAMEJ-@KZI4mImMeCjo*eI z6S@ZtB3}Fo5<)1F(a=dql9VL1xU9y+r3)N6c!+&NdpUdYLY%#qa2QJEHr8(3!25Q; zkF^`t)7jC1!Q)&Wf{Z2P3n20TTLUf<{F>i?A@Vq}30YR-(BVUjo;`z26V#MZ5*&(B zr|cJ;KdiZ`Dd^mnxLpsn%4sf>oNR)1GJ56?~UOjTIX-ND0YohRlT z{u=*We7(j?^>!ZZD|7gKjY*U8;l2vpa+MZHfdU1V6#AAg=aG+mr0H(hNC*^TQEW3Y zQDU(WVgpS0jjky(cjiB?+X&$oyAP@Hv_Hm^6t>f`mkb@$vH<*gr_Mnvt}$kcpsJ6h|%h z*iwmm?zxNQy}gKw0R{-pWvB|4z@2bvzz{eMqJcOmHpW6<-j<1iN|0R&jpD#l&8z(I zhd;bM5AX^s$--d82c|J40HJMugqfjlP@V3*u4CJ~did(fa`S`v-GP;$HNJT~uzNI zdT6cthJ4S4RZ+uk-m6DUN^P;m#q$?9K75S*hXxrPKTBSBcxzB=XzNcFJINJs=P3SMKFk>Vuu4FsW_Bvcm{+5gIZ&W(*@Z3!eL z%N^J{UX|QA)~>sQ9ou#wCdFin(VZ~7@7y`WKw=G61GfP3CWpcUrb)ImAuvibxIIQ! z2MmZ6e)iL6n4FlnU5~JdC0Q7l(&bjUZG)cs1a*^)efQeO&P`45<*7IR``F|p|8DY4 zj-Q|9FOJW?>UU3^e2r(JU*xZjEWXKIC`ZT7^T)6Gt;=dWK3e1PH#o{?i+uCcYtGyJ zHt218bG+ugJleAIOJsupk8ffyCs%nb}E3Po3e=!NUv>4>PeaL)|MX z21ye7SM{@V=Ur^vx|Nl^D`+bvpmls!Cnp*UiASSzqH~UDn1E@R3q{$WvB6R~O)VB- zbo3Pa54?hNp0v_RPC^)dcwCmzQYmxiop;jR-G%xLH3Y{(^-)FOV{)R8DUX8pg3I&Z zd}x#Kw?>q-Hsp#vsx;!~l{=n%;zzfYThPFgEDX;4UE$huyHKf=+34#Wt}Bw1_}H=* z?A#Q?#Um+DV9DaK#~!89UI|N|f;A>4Cx$p=#D`EI6bPe+BuR0()8ZSrMTFc2!TjO^5zJI$VF?gw#30x|L|j`r`}{&dJD1S3WJ;PWl-SFU~QSt ztm)==3>cKLxmo^tyvCdLM+y`u@ZQB8>({d91NQ{|zRN?)-lX6>-T--mCy%>ngGkWn zqlU3FqrCjmiwqqcVtRf7H6>~uD5Wf0*2BH`+{@0L+v)4;p;9h`SMuDEXC6~BksKJD z2NAH%0wG8qQHk6J#j7+;>q4f1<=prf!^aNcMT1|A3SunQD08(smDV=yx^p)@-CdxL zxVaDHLIP*N$3i1yECnKgY4|my@vZt`y)3w1|{QL}mHhkmEp$il!@b1RvfBlPKOQ?oeW$!(y4P?4NE)vv(H=s^J3*TC1 zXQp}nxo0`>${@A8P9`Z?=15ywShryV@4I&o+cpo-)zOA?IX=%34SooTNRULThJCk& z`~fjZXwwHvLj=KEi}#*fTy#AYN=b=@`FWmy{w1cSrm#s1YSPg5&mGPwS+&lc_uj+y zfo(AgI*(leuu0$w*r+!Mg@*>RqYAkX#76aX)HpJ)Bqqf)te`!ZSRhy<{L?=^O`g4b ztYMc_VNjqzfdU0?9agPc$%YN_P$A?d1lQL=(T+kQdab(W3Z22z>^m;^*{63PzdEmmi3q1AXCvV#`yc&wapg@5F1q!@nSZn#x zzx)l#r38mZ#bA;WCBn!c?6L{Vs5%gjwF%aixNvcd14FN{xKO3s(g{Arc+c+LdwA%f zhw1C-z>t$=bzGJQ_d@5;Xq&9ju*jH3L;tldD{{F&@EQ{1R4~SpCYI@mX^ploP#B}`ObNF-Yt6AKh$BA)`hD562CN&Gat_dzv*GB$RWXPBY9QW&vCZ^CkznU8CM=F6n{fX8Sq>c7$KpZ_A*Jdq zIf5EEb>b8wCr{$DJS@lxcn98LO5L;KiJDX4}n#TCr|#8?EFxO9slJl=o;6BzyP0NNurBVp{-< zZA28RP|F=hjvQwHfdeeos+cf;K=C;5Lx(^FiA6$~?-Lhdy<8lK!F$DNIK~=FDM^@{ znPza`D=f?}U`r`3WF6zZr&h0`K4-`F?X2$aN4!H_h82%7VWC$a44s?T6GbHaf9FGg z!h45mj&lwp79%!FI78YbD42x*`gec-uHzOouw==HTX*i}=+NL*eGLj(V}Sw%3cNks zbI;xM_b(6D!>Xv*pvzYuEP=_f#^W_@O7grKYW0j)_P@aS^JgigUF5bzo<~NfqS6Q^%meJ#p{|@G8MQNDKzS zCJE!?XE`u5M73TAlaP6EVnGs|cv@Ooxci5`uc6j!!(|0EEmbCr_QG zcmIA`T3hJr><*m*b)hp5<$QQL9!N9NN#wnKLA%3ThIZ z7@UGbS*+FAxNai@n>Pik<1E7~K@lM_F;Sw!q7h6CiV7+NjReGCr5wfziZL$oAtZD= z2ygYo@JWsyIeJ^3-}PciZDVglr@-Yo`zL4k+Y41*my0MVA79)vQX|QeyY+o%HuDhdd{^DTgtHK@c^dHV_GY zL+c=+qae~Wb82)F#Dt;&t{_DU)N6JA{yX1|a>aKqmQ=^!=+GcXhX&u#IWEmF@C=Z& zw={jV8<4cj-Q}ErKDWTA1IT##VwI1qYvq0EBICWie7r1_`j_#ieS%w@xgr1&d!Cc4_4SzyhFkW%agJ?`wOn@X54Vt6glBOv;w%^6fT$Pt!+Dm<*hEao4 zA$293*2sL08WS@KEHg88_75JQtE-Eh0|Qu_;&T^v@1n$L0=#+>u}#NC^}$D>l$NN@ z&olVSJ|-_skd{h_B%$5!L&#CBUS;R5UF_VkgTxqIZIQH;hAxK-sfpfsBuNl_%wlM` zItCe}^PUiqs8N03qd#IuVGOQ}!qEN$96x^iwmrk^#gZxvZhnB$)s}HZC1q_1emXwE zlP$~HGI{BWQ9=R;1Knlzja}l8Yp|xbhd)@^!J*^R9KG5pCFNtQyV+iy;%lQ-CQZtt zt9n>3InT#)KDwrd2eTRe;@AS?q&%{^hu>SR{MpC?=fg?Z)|K+|nTz}{bz%3)UjFqR zy&RpJ=f9koX41Cs+v_{|m5v&JJLA!7Ui0eM+-CXF=?M-xXlw1{E33PiJ~hG5;=;bA zE#Zlgi+s~TS7$eW*m&Q(z3UYyP~i69fd@W-G%^neEq0NV#H3^{3$ex~qzZz?s|ieg z;{zALS7>V&wr$_Wg{diy9yyMyDc*UEI6OHv8WJJk)ltt3XU|<^|KI_7x_emN-ydEl zAwVg=!g){yocdtStV&`mC7W<;^cX{fgP@8vDP9bs7N6ItWi?vcTiLVcF527MP@jh` zMzm#$(FfrtZ<;HBHq4Y%gZYt!g24OB21p@RSy3016j`m#55E6{+xGn43YJu10M}L= zyp!TVvpR!W2M;c5;l=Sq&S}a+eJv~(7_;(bM=~-s%d^!`ZJwT5@S}R;n)5tY z3)dW)sk6{&cW0SeU9cOjt!rrudk1;&Q97( zDWV=@g{sfMMCF4C*#!z%QOc!++TtQF?|qST?n1Fa31J1Fd(^|`O`F-YX%hy6 zclBU|q=Hc&42uGN5Gy%OZFE-*6i8!2V3Kp5BsFn1A8nXp!4N#04Mz?h;_TSi?Rtc7 z2}|-@*nZ*5SN*;F&Y!=t6KlqgMyE(uwDSj>I!V0a&{UPftu4&CH>VoTIgJijVdj(( z$TyX)9=^7!LJFWYzCJv`bBU$hJEr{Q<283o$H!%?p;H|b`4xY!J0>u6CIa=wHT~sl zdZq4^x#l^QEQX2*AlJC&8@{hxFZWT@F_S}k+Q?mps()2Jf>##?xR`YyfW*pVfgRj0TLyo19v%l+uKa>(?_mJInms^DN9% z!};*?0aR9Wj)K-vweUhW+o&iiQ!1L`8_70q&7uS4j&uh;Ne45 zS}Ta8WGdtes&z7#(b3+{`}gdjv#mWiENbW!h|h!XKxmC1HUUCxvaxCKZtxC`T?1oG zOiT>la}skJ3#L}0-+(KgtcjK>Dpg@6J2Wu_A^vl0QrCi2Ge}6O- z@!lw6(qTkB!l}8Q(N?y)S}*jquFT) z762Z_a`DnUFYkSs-tG=ItX_?%kc!0#hz~P;YXF2KO_;oJfdhm4n46y`P0M5m&WCP? zQ%{YI_iZ0w)B1H-4JwB`$_#_Vu<3o*VPs_F7C*hWk0qr_Xh47Vb=qq`rgiFZc;@esT@PIU!6GCgBynA{BBUbC52~|L zW_`&}F^1)BZT!}X7N#cW*y{j1^G@k0CGV)>^7R@o)k{3uSEg4$Y|4lGDs;zt^0g{2 z)JlA~uZ0x`j7fNCc`NgEA zIsEIc@Yb(apg@7!i~be;Jo3mxA)nxi-Lb?>0`FWP`=P;P2*nx&=R@F-=2&%*J5p<- zVgWSwEbr*zt{ppAvw9_1!8OhyKH3x;R6XkJfwv%rQ)k9_Y42XnPfW&T;Sdny?m`l zaEr5iZS-}b$7@3~o*bK^V`V3QwAGMh8P88uIo*;F$gep+9-E}JqJuAQ>7s&Xe13ty z8EX&^-v+LAO<-Aj8y`(PKbgv!QqIHEHCFZa@zsjK<{3|qP4Ue2CzT-4@AlOI)&QE(D@2NDd1QX9TO)q9NO*q9ZN=Xoe999RSER;*ypox7Nt znP%errC4NuZ~6r^w%|3eDT|rs*ol+$E$gMNWe=Sl9XRKr09b|TH z0aGedBbW<`fh^BaqwL(io%QS1q3V!4#|s!MO*cgep37*c2mpx|$BM^c6Weq+1cJS? zf;BO9F?JpP@h3m!{P^4cX1*<0l7)dOU2dn_Ht4xeP&digcdvcy+|&eLo_gcIk4;YU z?;`r>Ve)q)5bFY4pU*xZjEWYV|2ZS;QEey67?-xD4tY>Ma0Mb2au2?x&}_o2^^6Vv{7c@RhV&Li3zs zM~69aas*>as40>AI*2FnLN%+hV$}-nx#w`hcsiZ- zBpFx5@pM;`c2}E8rly>7#;NX#t33A9)TDYkDOYNeu@k@;V`LfIh=;%!j6oO!5_(;P zkglFr(tV$M&ttFkn?Ke%R{{$?K6sGGn#0u-ytYPuOMVKg~j1{a{8i9+A(*`!PcvlbaFCKB(jE;lHBgW#@ zA<;2WkzlM8eI35L`2j{o&hsRBmC|&9!Mm?zaAmNzg^yp?!)FW_l%taq{QZe4=g%`- zTkkm=pD~e0T$mLrm-GJj-y9ZHF__GTr{vgdB}gdfH`4A1XpUkq)&^xiBY0Je7>o`5 z0~oV;j}LL=*l~Q&TaF5Y(e@zf6*zU|&aiLbAbq{B(An9^ zoZeon3c2&_+P#;(dtb%c0!W5R0Ti68QgfdE{<&;ew~kU)z@P0`Ai=09C>jbz9Y%y= zmf?LIPZYtsJbqpX^%D(V5qO+j)^FIz^$Qo`@*L~^OjD&-hZVtSP*RjA zQY7*7iwfQZwL~OjFI1G8QHnlJE0|DfaULG}{?@D3R{26SkMptLKs~C;=y-*fcf7*kBS)E4^n!dOckgI9vz zv9sh3uDk9!Hmut~W(r~IK*K^WCIl^xAhjHx`oVjT*C=l6gO7t3MTtELq9OZX37Q5{X3O?Xz*?29_;f z7M`*RPtKu?U{K?U-p2qzHkg>=kE3~B!B|T!g7ZFl> z2S&ieNe*uzxP21Y4PIg*|cE;AM<_a001BWNklaZ%A=C<{P(sO2lw5PB>eIB8irlh)I6$g6g67gt!IS(bqW114{R9)pSt z{gg4!;8pqlmIul6{9L#AnxbiY3|6k&z`(AZ=hQW5rm@CEB9VBfY}l}#wo(br9pEw6 z#x?^*eXu9SS_~GIAZuKAim@TN5Hbuw1jtGu2m~pZ*x;w4c=5q7EvD^J#aThLF}=_j zgA8to>T~Az_p@okdd4RwIC}hKcxw02t3V}o8hD(m2c<%6oLX==C#cI=xpEDwSFa8> z%Jn?XM}!QB_bvo7rtpUQamFz=7%&(FuVE@f6~ss=sf0uUUX=Z>y~e)1d#}j%^={LY z8H1JUHn3{l#@W7%8N{~m@x}A_%BuN%Wl0AMX8VE?i9{mO(b2(g{?>1V#o5S=0h)&( zN~B?IkJt-<2T>mu;j^Cj1&c^%M{LyUHEIGF4Nv=+;FYKl6$F*sM|)KbX?yR%#YMQK zSY+waC2Ux~j?T_b#8`~ULgQZyBEo-TlcOC zRw!gMV}^~-qtcK>4l92n8(~d`VDVn#$YyxYy7z2(U~{-nO+eFS4BlB#ka5er4nEvb zWO318yyxIVnawB5yzVY$|GAmh-O24z=1T)p3}1U9ScycUiTU8IA7oyCe;h#!ZFR9b zfZ!9Ghsgx>p`3t4UBgV-&eY$<1W{z~U2NHF{38ek>y=zg3^syDQHJ89z)&NQ2>l0P zfmKyH+S^&ZY89tP#@MxMfT`&U9*byr`VWh-#HaT-MG>Zgv{Kmp={&c?MzZ|VlzR>=2pOo%`p;C2>r=TE_NL*#inbQX->WCJ? z#QuO#gU>QcW`wG%hh{r(P&F7EyZ4PD_i?Q>apCtt9Wd1DEl%TLgDTkggdQ^mMN^p3 zMKJO2NFyd>7Q92m(%0L=#&v6$7%j8!)dN^l2w4kp_%q7PArqltbkZ0@W(4Dc8B)+0 zu(4MnyeHPUCofXpD!-9v}CwERS=l)W!{&&?%1RPK@$kIYhEq zvBW3lcX4aUkmog?8>zFbzrgkb!7e(HNF>h1kACzd>o=^UZ{D1kLJ!aQRxF4^wGfN$ zifZUV5R;)^!lZy0Ajk)Mz-Ae_s|Ocm2?j{DdYytTVpVW41s|P_&KMb?Gaddym};%C!^SF&y z2w*CeDenHpKVOqZN#~=f0t2}4#KFY}55j_;9zNNYvw3iuiA&yp=?sofvM2T`l&7b7 zraqSsc37T?uAK+Q%IuH($ICSa7qqgt0Q;qd_ZMqy8?1yL23O~a(-nTTt?9nrL?ZDn zaOmJ69@+XR|J`qWj7$_U4t1aug1vBP=_B_!nGK_fUX?5>pf1$mLwY^ zC5whiF7zP8#a|^ng&T85GXUbyf&}ShV+5mu@zC4V$&HH_@#4>40ySik;k+XkDB!_| zMO!tyZ(D@(Tu8=iXoH?&Zr1 zOB4XK%3b@1`N<`Wv`*%F)|iZAJco`#=<)@qc%A7OR#hCqsoZ3i4(}K}t07i%$5?ZI zy@^EPUErYyA7aJYRV=$fVMOt`GcJUUdc83~qpTo!Lf|1b3>F%MQlW^St}%4vAhqd9vSK@`VY0zyVZ6~h zkFkYN5+U>pc;`5FY=~X2?53l=gRahY;)gq{j3GEtoGRADzjKXH(BOTb(KBLZkWrMW z$tk|OdCTm4S?@AUIj)Fv=z&+@$bJC)Qg~wlf?wB-|p0mD9$t>NM_V$TH;yp~B=iKv; zcQJZuG-wN`!-~UbG$_=tfH$m?Lm44M0Bah4386X9XlVEgLPHIuLx*}7rUHl~hE`^r z7rpm*?{GfHdnMN(dgNRvFBGh0@X$f_?cRlmBZwk2CJaEt#vnqmCN>iK@UT_S9eelf zWAEO5OjoO@7`!ov4S#7iWR1PXDGF<%(xHU0O4T^p82;bmdG@(y85%k{E8o?-O;ZL2 z`t)6NR3D>l^g(#)0c8D$F77}vtnKgPPr5x@j*Rhq9SS03G3veG{!5_0vx86c6lghn zTXzSyl>~!fVNWNw6>2;`>9|m6;A>T0s1>RL1%)c$}CLAL%X7E(Jcdw8W?4cJS87ew9cbPN9kP9#b2y6)a!M&k5)L;n$aYB8;L~X3i0!w zzsPmhUC+mU^Vgzqkc&^sVI;9p_YX~lb|zSuu^2ECX8j~I@)-#$w6i9x)bU(`LA)Xs zL~Us4)8K`GC{_e-z?ux3Sq|*q&yF3hAd-dGTfwPEB~Bt(+fX40*$D}`2_=*_44$LM zj`PaSSGexF>uB%nAorfkgzLz}kT8ee48xzWMa^3ilqG z;@)34N6yyQkH2w~pXO`(rq8^hR^h%w74C~aXKh`4Q9WZRFi0d4mw>AB=%bIaWa$#t ztzV7zjq&OrU+fWZb*u$2L7>Qo>4Ju-kec{2p0LasjDj}}sb$DahSwY)7jcmg9B89_ zYJ`Fq3WWj_6O+97;tQM{I!Rl57j*@XLB-<)yz@}dn1v9KEP5h%#2Ruhn5a20HC>4s zMh1riv8Z@V9Mkjyxyvy&i?+*As@aDNpPZaz`%j;nmG9Z3Ipp7QQToL1O7_1FX<)M`Cu|^Q@Iq=#51F!C*rKN>J zp@rPJppvMDTD?MM4TWNXTBV8@i_fDlv-qIcC{TBGq?EC2^(wl$yYbEe28@~UK=96C zieaoWOiwg)6{4UKhOIx`%GlW0?0q>`fTjx!F29z+wSl!QeEhl|K4U`j&C$sT{{BRj z=?fp4NF)-!z|hbTU;l@H9E*)B{R{2*hhOqb+MH&RsaZ3=^ADo5hVx%a$;2-W*1tS(DLyWH^Jm7_CgqI!>oge?u}7vq0tEW?Qh@D#Kh%Lt+~2s%Bh4zB9TbEbG-Nb@O$59 z|Nhrdv3N{)v=$S51mXyvA-XB6hF@OMsCwvP$p9m`#t38ZPYxM}IF@KkNb863z)(gw zb!vzmJ6>YCTt`iY6HDDH-hlIvtFmhC3f8Py!{Wt@*|2UMbLP##TaQX`gY;PHuFjGr zOIfviHJP<|7rMG7j#(1_-57l&x;3hVSxrSbF?5ojHhFJBgA^Df5{X3OEy?qo@816a zW8)KXq>!^l5D2{o4b{RZO>Bww$v(W!$3jDv6)-Wl2;+{S?4bUP5}`)#1MeJr_wMA- z!9!S6L`(rSMRGhfpEFgj(be0>h7IfK?rg_;WzDkXtXs93){a(u?1AuFr?u3=rcF07 zw{Ol&k3-Q&y!J-t1T)?XM8ifSLASxx9pB&bAbEa?;xyMNDKJPR5{bllIWl;d?|tV1 zYPD)u&^3(hdGCYS5pfzJ+87%wssuzJ;U zRxV#o!DP5RG$S@L1tyjX8iEVfnBb`B6(6T3hEAN~<)8n27QcZjO9~7Ui9{lCL0){} z=REoNlVLF@>Jx|sW5EZ1f=2qFt_G3t%M-+qd~i5^gHcoNV$-68r*|ucf-zXJ?AZA; zjtm|SWec^Tzrg_BQm^N9^|Z2S(*`=*yYP9CNK)@u*f)>&t=quD`TbPg6opce4VyO7 z)>_0@bHsu1!BScX_RK~Erw)$`-VhD(B@t8&Klt8zCtCn1Fi0d4iNr(kb%=^VH}zfHEIQfNWzAieRLsgC>(nA)bkKEi1QRPOJ)qGPY?0Z&Yw}K z)iI`k*bJ$&7@W)5vwJ584;({m5u`xf zD|znlPRa8MbLRDN%f|Q7R?6^sg`l-`W;rVr3aq-}Mt?rK3sSOmeNCN>vp6cN%ODC|^XdOj?{#sq6-=b=Cd5{8Z) zW6!Q#7+Pp)>7-Uye60Id>otr~ZoYXFi?3S*&XGlH=Y|ih57I{Tba%61)hb%LIWtlT5LEIr!Ae8<&=Jg_cJ123j+b}L-p!sRnlfXsa@_`2t=l-;myt*$5>3hT zKYM|nKJyIfgTtZDh#KXHcNn9HRYW}2MuQ?X;h9`R(BOQ-FewBHVlCBbmEEuG;P8>- zNLHez8D1K;%od;LEWZAF)~{VdW*l z$l{{Gc+bI!GMi79dEEgBE9dm` zue&q)GfP1|Co0oyJu$)0YDs%Ikx0BZsMqT}_`UBlZ^3+4tyviw2u)1QYY-R0cr**6 ze?nY{8_)JpWl)1+62;6=$TAKdJjlSlUDTX|Vmmb@crmC_cb+UOvT5Tc7A=^MtJldi z!vi+@9EirTNerrv%o^|>@AKf-V1pGijXnq;`XW>bngtS?5fym(rI$H);PqL#zq3tK z1qSapQy_+3Q|IZSG6yP-ywJjLEbQh_=R5w((F$dh)8$F-nQ)9cMU7?koNhjKT?Z## zpWtwc4HAjOdxUbi%(w2jm;dzVpQE#*9UmL`#DwJ^55_7^>rww8$P5Qjp`cZO2P;CM zV5wH7*tv5j$A(TJSqnJ^2VjEGQYEjkc+n!(u3AIU7O2;1!BE-QAh#UL14cuT;x+g( zh}Ox(oKZ52B1|4oYcLunDwN>tXhab2c;un2)aw^xZ`*t{RbVi%YbOJ{cE0@+XpLezAG>ibYo;bS-BM(pF?1To z^Fw1iRQC9|e`R@`Q>8X;$b?RDJa=N02g^ZbBrBHq#QZL9EgAARUC)o-n|}@xiNxDr z=)@4;`St^R`hWgIAj%74qI0rAeCP@EF2_g~OA3k_rD0E$S+GVpaNsp|?%a)Y1$Fiwq+8+)*zY ztO>6QO^6=n9M3)bEQ5yzXZ7CBCQTI>Tz&&3!_wA_T6LPCv$6rDt{%QHzl9cKXv6dB zsd4tcWws#Wjz!(9ua5D53|1I11%7Q&A2&*!?ZD{7G`r_@aBFFj%@qJ)aaSwzr^`J5 z3qipdmbY6TJ~Yg32kmWL+_|WSv4g`r9T)Z$?HOAKPw`C$-CaF=enBUDUK?Y80v}z} z!b&L%;Gb;s~Q)(Xcpsi9An2TFEKeWg|V$bF*F4Fpm$Kst1Mf&iq)%^qb>&*{0&sYEP*I^ zRcvNaa-7dYX+ur$dLk#wiul||>Ej?|BdDvz?8b)-$)a z7hkDisH4UpKBNjHiYC=un0^p6$QlzSAw)yZfiX5tBnU0VBK5ousyNfIleYY9`*teR zmD#wBR}D?M8ra|c+&Q{rKl4jqNYzz$TrHvU|osyKuUFqx&pJ4Vk6hBbGLHRo@IL?Uqon4X^I{%?JY zKmP20ptq+tEc-_B&IjKEHF)n3g<@8~t4B~ur6Q+Kp5(F=>kQ7{W|Xyq@k0e#%55k$7v496rKB5B`AP`=34)^$Q4-2f|ERJ}4sV zjNP)RvVZ@6UVZf-&;n9GCBvh5b=0b5y1F}AyLKHton2wPP;_RR0ZcS#QamnX3%o%s zL1YON6%M1VvI)}>ipO~$N(y3dF4#do_v~|wjg6h-)4jK7N*x1z`Yt-EkI^>zAUyQ| zvi?ICcc2*7_V@88-JUH+#(2IC1rag{;u%PR5B0UOqQ%l-gsx%>zdpZ%#d(#zZ_y9n zt5sg87P(_?i9P|b1#X|)O3xeC@0{nxa+ddX^Xu)7=f>-tm0+O1vx86c6lghnTXzSy zl>~!fVNWNw6>2;`>9|l;%-5>CP%CoB+?JW^`S9G*o31C3NF*+u=eKX?=^sBCyZmM5 zsouFf^dG3hyBy*u;px*S*|l>A<*7-;1fwK11&|`nDY-gUEnC8h8*ij&47l)Q@1ypC z;y{F4d@yAK@II6*P>}F9qF0Yo4^jL`j0w6+D8^VUhS8A`9)9SdS-W>v6HS>hn1AaS zrU)Rkmj@Z&bq|%Xz6%~Fro=~j3$#mtPc1F+skmMJG+!GSWe3pK+Rh*Jb<<~q6WFn- zDZYMqlHG5$B5R$;j*ZeWzl+bW=qAtWJU24M>#c=1yliY@iaqms*f2fD_BRC?b2{3& zJ@Y(1TBjPf?H#MKuzxOJDj95E=gFg^JasV>1zP9PqoZ^#=;AL{8tU~r+ea%LYRz~L z_u3>9iOWE(R^tZ`KFAGAZe+=gH{gAa6);9Z??F@-#E0_3>jw_-`hnLewUm%TM%{ZH z4#iNf*XZi*X4A&?%fioe@Cb)Hw$*>>a< z+b;2X@>+%a2P-rxSEkU$f8vdSfqKWV^Xwk4F!HAA#qMJ#xbsctboC1N9-89bUwk*V zzJC0ToBT9i+c*7&>$&exh5O>qSz8xhRL@un3=)aNJ7i*Fg1`H3{|{gI;umRaZ;jdo zHgrG5F+x?LQmt_OOjq8Aqmd^&~gSP_C zlNp2eN)BXM2HvAMEWwpgL}*whdat3Z;K51IRu~@{=Fx{Ao7MYvHPMuT!IAr~J*A=U z&NhCD3f~w%f79XRpj2vMsjsoOrbt%gqjOrZlcVgv_HJ7uk+=$(oS0zCcfZU3_y@m_ z%RR;@WELwjqmo!Gmgrx!i0=M=%2U&*8muV*p6P0t{_E%S{`cKPYpE4q&B2GoT?i#p zRS*?2V^FW44i%;59VRw68q;V{Y^X1WeutS-gcUsS@WYS3XT^-pMbiZamtV`^T}E2? z{bfD;5;;FPKEl2$usB=W!pE=c;WGvd%F)RQ{{BRj=?fp4NF)*$#WPPm#mZHy_`v&b z2~tKXL2?+#ougngZdiN+o8JFEo`3Nrrppz4?r`K-qpV%GnuYWGQSb52A?h(=QOyxN z))v4!R6_tDs#sq~%^8gYMMF=52aIBE!$c|c8yGK)j*qc@ds1;Zo2Cj3t`*lrU!UUt z-q$EuRJmvW2`>K1&rvG4M9SrHzPf+>9i1bQNF*-A_cm{4!Q444x$y=>0rlbO9)t6q zww6{_uUW&;_&5Ut`|){>I+)wv&!+Y3DHVb!kw}nI4h@ZAB1_dF0$yEkMic`MGNTx= z_&9kW#vpjS0%E~LPlY(a_2A|$OioPB=54y_Xv(RCL?V$$%swN-!+hsE-(hrgjHpBu z()uEJAA*GbzPW5%zn+DQ7KP?Sv8-CXh8q^&=kD@(000pkNklpnQQu5cl@bz(L?Ur@p{o4sXD_g9 z#ZrFxU;Z*G%#1D~pz7!-wX*bv8@OrxI$BDtELymbJa>3k$B|*gVeJ`ZS4CrqK;x7H zF{3~Jm-P?ze{;)8nFf? zm?JgTZkN~S?r7uYP3yUN)B31;AlPC?W?}-sdvJ=VL+fFYZzvlYBjmo0bB>1e5$KFu zvNbl8DU5(qoXh#AZ~QaPxwpLL_cAFkNF)-8#Jf(VQsKY;U;mYf@rmGBV1lLr3Puc> zf&MwYbhfsjF306L7=u_tW-Mq37DS<74H5k+>opIedhNfA}!v^@qZ*KJ^6 z*Uoe58eA)5jfq4e(QG{N*b^*TxQJi=gpot<2nKMWZXoJ0>OjDo(2ihDlrR>H zZ>SzZXhM`Ync!p30iZb4E7lY-7>rSJRbGC1Cwq49nZ-MB_0yCYgO%$xuxj1L*}jY! z#J2G9#q;>es`-3nNe2sN`+^dQL}C`%`opaZ4i4fqGzOxIMz=%fO$Y`oSipG2sS7TR zM$ss3jQ5Btakv2`$QvW^U}K|E5;zxvhUs#ZZAtq0f;3&m;GG2p8Mn;q;KLn778ec1 zdk#*N*?h9h>#;{=&4PYD+hgAlQN21e#NQ0RwX@;8?oMu(GG7{)VmQeTClZPGIHyma z=DVA}%cuX{AJJMWkr|Mf{5N6&K@C_WNF4(h8_EqroMuogq2~YqBN&r~W<*;6ENT?> zIi40Qu;b+&93C8;rB8kB&{ToJJI)k{q1V)TdZ^5SiX$(y@EZ%e`P2E1|8lfKIlgRc zbeO+BQH$2didW8KcPw-l43(;5JOu@bL?Qvez`lKKzW)J!?^C~rN2BYZ;zbc>VtK(* z5D5;7QK`@?ph_4p2u4Yv5mE3Wco!_4l`NDKqzXpy#`5^KZ9&%{abB7#Fc{dilYw13 z-~I`-#m<6}3@WzEzir(255Glov%cz$S%hsquw_pdCEbE?$F4Vlm>j^|E{@?bfP{A9%v zpP1jpttCUA*LZHE&a(aj+Yb!$mvMZ|-zA<}}83@>$5>-^~PA2WC;WeYBbrV0!$zk!lr zX=_HUI?d3`N-lJEb@9J<8mx01nk@7E)6*P1XAr8qbo>-kI*(7w7=L}dN`9VqTi$MY z_|P!B9kjP~ap$5Q#tshibX?e1v}bG`JjFL1ba(ae`30Tqd2NgV3Vd`?4{Iu;eED#N z5mVrfg}p41oMa7>NF=USRF%8Gem6HRTS8xVPb>`tTVzEMSHl|{jFKcHGd5-tGIGL* zVw+j2G!PBRg= z4o!^l4`YsFxzbWBaz}qRpDPMqK03w7J1i9pj!&{DrXkAHQ#@0j%Lh9w&y0j^2gb_m zkNd~VH3k>7vbX^IrG@twYit{=gdPT0=ZVu5ezmRXzTQM4(PY$WHSYPwH~GwG{*dF!S51o^$u{0)Mr*#0ERX_V!-B zys$(8K࿰&<~FGcVsCUZSgVo=9;4jqLvGN2W&GrbI|ibKc%lUX{vWAv=7h31a2 z=KOjSiA0mJclTZ%-}VIm>SMn~p;*8Oh!~s?r2&ch1Vp_95swuCFJJ_(CU`19ltvar zEh?U==?Yu7Zk@Hyb}iDJD~EIFp*={k1lKLcKeYvO+izdw!G>jXdinR=o-Ko;JX1S= z8bApPv;it(BiuRm_Ak4y5bQ+5`fiz9fXOLOBUScR_}npRiu4$0G(JdX>3;i{l1L;H zSCq#e{}FTN%;7`-;+IfuSRhIGM+nM?28`djIoo@Uo2WCj5GJ3IJ9Pl1-Rw{>@LTS+h&7WQ;h(I?M=Km^%}A3u ziA3UBVPs^K`|tlA|I45Khw$_t^9CB76Gc1*6Bc^)7-Pb;0v`2(Sc5edk0Z-6p4hgH z$%)C?{#e%>O&J)N;+g2swnOg+1a%`E-F*Jz)J^esd#BFxiaL)RKE`rHx9Z7MA)NduxhhMLs&G6+1aPqrH$wBofyOKY8k>tX;p3b?epv zf=K8Y5Cmg9>VmssbYVgh(HCgvTC9(#LO?rV9)%zm~zZfwe7s{JI`KW5A#s zot)tBPgI${@S%xBB5_qwtJV0YfBa`Y_h+AD(ZcIMqm8l+0&{9S;tbB`vE%?=6+>u2 zIC*@C=eNHw+aKearKtjgYu7dL9HoLwq+A~7tNX_<)Ab}0iNx$OIXTG#_dmcN{n5Xp zy`vN~3mT?NCj9aqLiLk40~nrkqMhBoc|lwa>0scJb(=kKnzc zDtHZ32LQR`hzU-LiUhf2413OB9TZWu3Db_(G$G*;)_T$QVLTE zg4NJuXp9MCi}7FI-|`?+Q|D_UPh5x;7$g#jMB=JtdU~3Bzxhp0ojM&94KauaRx)fD zR1EqB>N#}iAbWQ2ne9uy=1GA;B9TZWu4*R6C-}zKzm9jIq>uw%6pNr9a)JutKYimH zICo*++r;@vfk7gXNF=U$UL6?Vhd=m1R6vAWgZzHr>xl_|R?~~ve{SY=cXGRw`O?4?!%3nzkw_%o5qtOU<*|n!;kSPK zzh~FWJ2`sv*lb@?Q_)m`!8^|sD5uMl+%w@Ab&49x>N(wf>bee2ygtEUz0m%1om1#8 z7%ElAcnS&ClZOK9o$X6$Dw--VxLgKGrLG>nFu#QsV`#(k z>Zx(|>c#IrgYwexQ%vbRJ}qPX_3(75f(+bs_t8fLeH_O>qWT-3wZ!C{_$i-r0^ zuV2xgv32ki-*nL3)x+l(bh78QF$O5`(M3J1sf_aF!xctMfjbuVvP5#WUzR(TNF=UB z{`x=v)ofov;wq%60t2}4#KFY`57mirzB(neWCd>OZoyVO^-I`)u?&t+vL{a0l&7b7 zraqSsc37Snap%YyobUBv+kvq%`{Vxca*e?Stt>9Uere(T#TwfNE1}21)p_D{g1N`k|%Itm6j_bl#PzdmfOc%2C-R27Gi0VcC_ zc*p2j(T3)ZvF7~x6NyA3@eXLtmBBgm&>o~%g6o#!pW1@C?YH0d>BMC8XTV!5@{RqK zu@UYZyZH0=6)c8EPl07_0Vb!;l`FW=>*c)f<8#NPDbjOR%psYj`{FM(kw_#G?-80( z$3UOHi;n7Jw2eLpPd$LF|Io!9CYb!=N}JAcsEO`iqzo?}x}eEskwyW@lTdG}u; zd&jCQ?4Qe*N(P(PdGhEePsL`1IUViXo_QW0ty8^-*Pj=y^XSo0Iu~^D7b^|*dY$c~ z6%Ms#__uN0i9{liXkMB!Ffhe4BLcP^dOskj8{z2Y3mm9*wjDXewzs?g+y^mmVu&xC zV~?v>xcAT$_nu?l?qeso^P*p$v-R~inTPmkzP4}r%qwaY?mJZBzW8(2*2NdqGnN8_ zL?V$$oJCUx21o8sN3Boc{4A~8b>3=)Y%B9TbUkOG55 zB9TZW5;HWNjzNQ!>ozd3Yv(z24K8HOoJb@RiNt%Arpy?uT(^N$>o(5zWy~P9g^w?u z$5&R(=POG(STNfclt?5JiNssbbQy!o78FRvM;6cF-?VxjIe3ySl}p@z7W2A0xn0V9 zX<&-sBqxY-~0do literal 0 KcmV+b0RR6000031 diff --git a/examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md b/examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md new file mode 100644 index 0000000..97219ff --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md @@ -0,0 +1,508 @@ +# 对OCR模型CNN-CTC的鲁棒性评测 + +## 概述 + +本教程主要演示利用自然扰动serving服务,对OCR模型CNN-CTC做一个简单的鲁棒性评测。先基于serving生成多种自然扰动样本数据集,然后根据CNN-CTC模型在自然扰动样本数据集上的表现来评估模型的鲁棒性。 + +## 环境要求 + +- 硬件 + + - Ascend或GPU处理器搭建硬件环境。 + +- 依赖 + + - [MindSpore](https://www.mindspore.cn/install) + - indSpore-Serving=1.6.0 + - MindArmour + +## 脚本说明 + +### 代码结构 + +```bash +|-- natural_robustness + |-- serving # 提供自然扰动样本生成的serving服务 + |-- ocr_evaluate + |-- cnn_ctc # cnn_ctc模型相关:模型的训练、推理、前后处理 + |-- data # 存储实验分析数据 + |-- default_config.yaml # 参数配置 + |-- generate_adv_samples.py # 用于生成自然扰动样本 + |-- eval_and_save.py # cnn_ctc在扰动样本上推理,并保存推理结果 + |-- analyse.py # 分析cnn_ctc模型的鲁棒性 +``` + +### 脚本参数 + +在`default_config.yaml`中可以同时配置训练参数、推理参数、鲁棒性评测参数。这里我们重点关注在评测过程中使用到的参数,以及需要用户配置的参数,其余参数说明参考[CNN-CTC教程](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)。 + +训练参数: + +- `--TRAIN_DATASET_PATH`:训练数据集的路径。 +- `--TRAIN_DATASET_INDEX_PATH`:决定顺序的训练数据集索引文件的路径。。 +- `--SAVE_PATH`:模型检查点文件保存路径。 + +推理和评估参数: + +- `--TEST_DATASET_PATH`:测试数据集路径 +- `--CHECKPOINT_PATH`:checkpoint路径 +- `--ADV_TEST_DATASET_PATH`:扰动样本数据集路径 +- `--IS_ADV`:是否使用扰动样本进行测试 + +### 模型与数据 + +数据处理与模型训练参考[CNN-CTC教程](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)。评测任务需基于该教程获得预处理后的数据集和checkpoint模型文件。 + +#### 模型 + +被评测的模型为基于MindSpore实现的OCR模型CNN-CTC,改模型主要针对场景文字识别(Scene Text Recognition)任务,用CNN模型提取特征,用CTC(Connectionist temporal classification)预测输出序列。具体说明和实现参考[CNN-CTC](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)。 + +[论文](https://arxiv.org/abs/1904.01906): J. Baek, G. Kim, J. Lee, S. Park, D. Han, S. Yun, S. J. Oh, and H. Lee, “What is wrong with scene text recognition model comparisons? dataset and model analysis,” ArXiv, vol. abs/1904.01906, 2019. + +#### 数据集 + +训练数据集:[MJSynth](https://www.robots.ox.ac.uk/~vgg/data/text/)和[SynthText](https://github.com/ankush-me/SynthText) + +测试数据集:[The IIIT 5K-word dataset](https://cvit.iiit.ac.in/research/projects/cvit-projects/the-iiit-5k-word-dataset) + +##### 数据集处理: + +- 步骤1: + + 所有数据集均经过预处理,以.lmdb格式存储,点击[**此处**](https://gitee.com/link?target=https%3A%2F%2Fdrive.google.com%2Fdrive%2Ffolders%2F192UfE9agQUMNq6AgU3_E05_FcPZK4hyt)可下载。 + +- 步骤2: + + 解压下载的文件,重命名MJSynth数据集为MJ,SynthText数据集为ST,IIIT数据集为IIIT。 + +- 步骤3: + + 将上述三个数据集移至`cnctc_data`文件夹中,结构如下: + + ``` + |--- CNNCTC/ + |--- cnnctc_data/ + |--- ST/ + data.mdb + lock.mdb + |--- MJ/ + data.mdb + lock.mdb + |--- IIIT/ + data.mdb + lock.mdb + ...... + ``` + +- 步骤4: + + 预处理数据集: + + ```bash + cd ocr_evaluate/cnn_ctc + python src/preprocess_dataset.py + ``` + + 这个过程大概需要75分钟。 + + 预处理后的数据集为.lmdb格式,以键值对方式存储: + + | key | value | + | ----------- | ---------------------- | + | label-%09d | 图片的真实标签 | + | image-%09d | 原始图片数据 | + | num-samples | lmdb数据集中的样本数量 | + + `%09d`为:长度为9的数字串。形如:label-000000001。 + + ##### 模型训练 + + 训练CNN-CTC模型,得到checkpoint文件: + + ```bash + cd ocr_evaluate/cnn_ctc + bash scripts/run_standalone_train_gpu.sh + ``` + +### 基于自然扰动serving生成评测数据集 + +1. 启动自然扰动serving服务。具体说明参考:[ 自然扰动样本生成serving服务](https://gitee.com/mindspore/mindarmour/blob/master/examples/natural_robustness/serving/README.md) + + ```bash + cd serving/server/ + python serving_server.py + ``` + +2. 基于serving服务,生成测评数据集。 + + 1. 在default_config.yaml中配置原来测试样本数据路径`TEST_DATASET_PATH`和生成扰动样本数据集路径`ADV_TEST_DATASET_PATH`。例如: + + ```yaml + TEST_DATASET_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/IIIT5k_3000" + ADV_TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" + ``` + + 2. 核心代码说明: + + 1. 配置扰动方法,目前可选的扰动方法及参数配置参考[image transform methods](https://gitee.com/mindspore/mindarmour/tree/master/mindarmour/natural_robustness/transform/image)。下面是一个配置例子。 + + ```python + PerturbConfig = [ + {"method": "Contrast", "params": {"alpha": 1.5, "beta": 0}}, + {"method": "GaussianBlur", "params": {"ksize": 5}}, + {"method": "SaltAndPepperNoise", "params": {"factor": 0.05}}, + {"method": "Translate", "params": {"x_bias": 0.1, "y_bias": -0.1}}, + {"method": "Scale", "params": {"factor_x": 0.8, "factor_y": 0.8}}, + {"method": "Shear", "params": {"factor": 1.5, "direction": "horizontal"}}, + {"method": "Rotate", "params": {"angle": 30}}, + {"method": "MotionBlur", "params": {"degree": 5, "angle": 45}}, + {"method": "GradientBlur", "params": {"point": [50, 100], "kernel_num": 3, "center": True}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], "start_point": [100, 150], "scope": 0.3, "bright_rate": 0.3, "pattern": "light", "mode": "circle"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], "start_point": [150, 200], "scope": 0.3, "pattern": "light", "mode": "horizontal"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], "start_point": [150, 200], "scope": 0.3, "pattern": "light", "mode": "vertical"}}, + {"method": "Curve", "params": {"curves": 0.5, "depth": 3, "mode": "vertical"}}, + {"method": "Perspective", "params": {"ori_pos": [[0, 0], [0, 800], [800, 0], [800, 800]], "dst_pos": [[10, 0], [0, 800], [790, 0], [800, 800]]}}, + ] + ``` + + 2. 准备需要扰动的数据。 + + ```python + instances = [] + methods_number = 1 + outputs_number = 2 + perturb_config = json.dumps(perturb_config) + + env = lmdb.open(lmdb_paths, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + + if not env: + print('cannot create lmdb from %s' % (lmdb_paths)) + sys.exit(0) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + + # Filtering + filtered_labels = [] + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: continue + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: continue + filtered_labels.append(label) + filtered_index_list.append(index) + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + instances.append({"img": imgbuf, 'perturb_config': perturb_config, "methods_number": methods_number, + "outputs_number": outputs_number}) + + print(f'num of samples in IIIT daaset: {len(filtered_index_list)}') + ``` + + 3. 请求自然扰动serving服务,并保存serving返回的数据 + + ```python + client = Client("10.113.216.54:5500", "perturbation", "natural_perturbation") + start_time = time.time() + result = client.infer(instances) + end_time = time.time() + print('generated natural perturbs images cost: ', end_time - start_time) + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + + txn = env.begin(write=False) + with env_save.begin(write=True) as txn_save: + new_index = 1 + for i, index in enumerate(filtered_index_list): + try: + file_names = result[i]['file_names'].split(';') + except: + print('index: ', index) + print(result[i]) + length = result[i]['file_length'].tolist() + before = 0 + label = filtered_labels[i] + label = label.encode() + img_key = 'image-%09d'.encode() % index + ori_img = txn.get(img_key) + names_dict = result[i]['names_dict'] + names_dict = json.loads(names_dict) + for name, leng in zip(file_names, length): + label_key = 'label-%09d'.encode() % new_index + txn_save.put(label_key, label) + img_key = 'image-%09d'.encode() % new_index + adv_img = result[i]['results'] + adv_img = adv_img[before:before + leng] + adv_img_key = 'adv_image-%09d'.encode() % new_index + txn_save.put(img_key, ori_img) + txn_save.put(adv_img_key, adv_img) + + adv_info_key = 'adv_info-%09d'.encode() % new_index + adv_info = json.dumps(names_dict[name]).encode() + txn_save.put(adv_info_key, adv_info) + before = before + leng + new_index += 1 + xn_save.put("num-samples".encode(),str(new_index - 1).encode()) + env.close() + ``` + + + + 3. 执行自然扰动样本生成脚本: + + ```bash + python generate_adv_samples.py + ``` + + 4. 生成的自然扰动数据为.lmdb格式,包含下列数据项: + + | key | value | + | -------------- | ---------------------------- | + | label-%09d | 图片的真实标签 | + | image-%09d | 原始图片数据 | + | adv_image-%09d | 生成的扰动图片数据 | + | adv_info-%09d | 扰动信息,包含扰动方法和参数 | + | num-samples | lmdb数据集中的样本数量 | + +### CNN-CTC模型在生成扰动数据集上推理 + +1. 在default_config.yaml中将测试数据集路径`TEST_DATASET_PATH`设置成和生成扰动样本数据集路径`ADV_TEST_DATASET_PATH`一样的。例如: + + ```yaml + TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" + ADV_TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" + ``` + +2. 核心脚本说明 + + 1. 加载模型和数据集 + + ```python + ds = test_dataset_creator(is_adv=config.IS_ADV) + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + ckpt_path = config.CHECKPOINT_PATH + param_dict = load_checkpoint(ckpt_path) + load_param_into_net(net, param_dict) + print('parameters loaded! from: ', ckpt_path) + ``` + + 2. 推理并保存模型对于原始样本和扰动样本的推理结果。 + + ```python + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + with env_save.begin(write=True) as txn_save: + for data in ds.create_tuple_iterator(): + img, _, text, _, length = data + + img_tensor = Tensor(img, mstype.float32) + model_predict = net(img_tensor) + model_predict = np.squeeze(model_predict.asnumpy()) + + preds_size = np.array([model_predict.shape[1]] * config.TEST_BATCH_SIZE) + preds_index = np.argmax(model_predict, 2) + preds_index = np.reshape(preds_index, [-1]) + preds_str = converter.decode(preds_index, preds_size) + label_str = converter.reverse_encode(text.asnumpy(), length.asnumpy()) + + print("Prediction samples: \n", preds_str[:5]) + print("Ground truth: \n", label_str[:5]) + for pred, label in zip(preds_str, label_str): + if pred == label: + correct_count += 1 + count += 1 + if config.IS_ADV: + pred_key = 'adv_pred-%09d'.encode() % count + else: + pred_key = 'pred-%09d'.encode() % count + + txn_save.put(pred_key, pred.encode()) + accuracy = correct_count / count + ``` + +3. 执行eval_and_save.py脚本: + + ```bash + python eval_and_save.py + ``` + + CNN-CTC模型在生成的自然扰动数据集上进行推理,并在`ADV_TEST_DATASET_PATH`中保存模型对于每个样本的推理结果。 + + 数据集中新增数据项: + + | Key | Value | + | ------------- | ---------------------------- | + | pred-%09d | 模型对原始图片数据的预测结果 | + | adv_pred-%09d | 模型对扰动图片数据的预测结果 | + + 模型对于真实样本的预测结果: + + ```bash + Prediction samples: + ['private', 'private', 'parking', 'parking', 'salutes'] + Ground truth: + ['private', 'private', 'parking', 'parking', 'salutes'] + Prediction samples: + ['venus', 'venus', 'its', 'its', 'the'] + Ground truth: + ['venus', 'venus', 'its', 'its', 'the'] + Prediction samples: + ['summer', 'summer', 'joeys', 'joeys', 'think'] + Ground truth: + ['summer', 'summer', 'joes', 'joes', 'think'] + ... + ``` + + 模型对于自然扰动样本的预测结果: + + ```bash + Prediction samples: + ['private', 'private', 'parking', 'parking', 'salutes'] + Ground truth: + ['private', 'private', 'parking', 'parking', 'salutes'] + Prediction samples: + ['dams', 'vares', 'its', 'its', 'the'] + Ground truth: + ['venus', 'venus', 'its', 'its', 'the'] + Prediction samples: + ['sune', 'summer', '', 'joeys', 'think'] + Ground truth: + ['summer', 'summer', 'joes', 'joes', 'think'] + ... + ``` + + 模型在原始测试数据集和自然扰动数据集上的准确率: + + ```bash + num of samples in IIIT dataset: 5952 + Accuracy of benign sample: 0.8546195652173914 + Accuracy of perturbed sample: 0.6126019021739131 + ``` + +### 鲁棒性分析 + +根据CNN-CTC模型在扰动数据集上的表现进行统计分析。运行脚本analyse.py + +```bash +python analyse.py +``` + +分析结果: + +```bash +Number of samples in analyse dataset: 5952 +Accuracy of original dataset: 0.46127717391304346 +Accuracy of adversarial dataset: 0.6126019021739131 +Number of samples correctly predicted in original dataset but wrong in adversarial dataset: 832 +Number of samples both wrong predicted in original and adversarial dataset: 1449 +------------------------------------------------------------------------------ +Method Shear +Number of perturb samples: 442 +Number of wrong predicted: 351 +Number of correctly predicted in origin dataset but wrong in adversarial: 153 +Number of both wrong predicted in origin and adversarial dataset: 198 +------------------------------------------------------------------------------ +Method Contrast +Number of perturb samples: 387 +Number of wrong predicted: 57 +Number of correctly predicted in origin dataset but wrong in adversarial: 8 +Number of both wrong predicted in origin and adversarial dataset: 49 +------------------------------------------------------------------------------ +Method GaussianBlur +Number of perturb samples: 436 +Number of wrong predicted: 181 +Number of correctly predicted in origin dataset but wrong in adversarial: 71 +Number of both wrong predicted in origin and adversarial dataset: 110 +------------------------------------------------------------------------------ +Method MotionBlur +Number of perturb samples: 458 +Number of wrong predicted: 215 +Number of correctly predicted in origin dataset but wrong in adversarial: 92 +Number of both wrong predicted in origin and adversarial dataset: 123 +------------------------------------------------------------------------------ +Method GradientLuminance +Number of perturb samples: 1243 +Number of wrong predicted: 154 +Number of correctly predicted in origin dataset but wrong in adversarial: 4 +Number of both wrong predicted in origin and adversarial dataset: 150 +------------------------------------------------------------------------------ +Method Rotate +Number of perturb samples: 405 +Number of wrong predicted: 298 +Number of correctly predicted in origin dataset but wrong in adversarial: 136 +Number of both wrong predicted in origin and adversarial dataset: 162 +------------------------------------------------------------------------------ +Method SaltAndPepperNoise +Number of perturb samples: 413 +Number of wrong predicted: 116 +Number of correctly predicted in origin dataset but wrong in adversarial: 29 +Number of both wrong predicted in origin and adversarial dataset: 87 +------------------------------------------------------------------------------ +Method Translate +Number of perturb samples: 419 +Number of wrong predicted: 159 +Number of correctly predicted in origin dataset but wrong in adversarial: 57 +Number of both wrong predicted in origin and adversarial dataset: 102 +------------------------------------------------------------------------------ +Method GradientBlur +Number of perturb samples: 440 +Number of wrong predicted: 92 +Number of correctly predicted in origin dataset but wrong in adversarial: 26 +Number of both wrong predicted in origin and adversarial dataset: 66 +------------------------------------------------------------------------------ +Method Perspective +Number of perturb samples: 401 +Number of wrong predicted: 181 +Number of correctly predicted in origin dataset but wrong in adversarial: 75 +Number of both wrong predicted in origin and adversarial dataset: 106 +------------------------------------------------------------------------------ +Method Curve +Number of perturb samples: 410 +Number of wrong predicted: 361 +Number of correctly predicted in origin dataset but wrong in adversarial: 162 +Number of both wrong predicted in origin and adversarial dataset: 199 +------------------------------------------------------------------------------ +Method Scale +Number of perturb samples: 434 +Number of wrong predicted: 116 +Number of correctly predicted in origin dataset but wrong in adversarial: 19 +Number of both wrong predicted in origin and adversarial dataset: 97 +------------------------------------------------------------------------------ +``` + +分析结果包含: + +1. 评测的样本数量:5888 +2. CNN-CTC模型在原数据集上的准确率:85.4% +3. CNN-CTC模型在扰动数据集上的准确率:57.2% +4. 在原图上预测正确,扰动后图片预测错误的 样本数量:1736 +5. 在原图和扰动后图片上均预测错误的样本数量:782 +6. 对于每一个扰动方法,包含样本数量、扰动样本预测错误的数量、原样本预测正确扰动后预测错误的数量、原样本和扰动样本均预测错误的数量。 + +如果模型对某扰动方法扰动后的图片预测错误率较高,则说明CNN-CTC模型对于该方法鲁棒性较差,建议针对性提升,如Rotate、Curve、MotionBlur和Shear这几种扰动方法,大部分扰动后的图片都预测错误,建议进一步分析。 + +同时在`ADV_TEST_DATASET_PATH`路径下生成3个文件夹: + +``` +adv_wrong_pred # 模型对于扰动后图片分类错误的数据集 +ori_corret_adv_wrong_pred # 模型对于原图分类正确但扰动后图片分类错误的数据集 +ori_wrong_adv_wrong_pred # 模型对于原图分类和扰动后图片均分类错误的数据集 +``` + +每个文件夹均按照扰动方法分类: + +![1646730529400](image/catalog.png) + +每张图片的命名格式:真值-预测值.png,如下图: + +![1646812837049](image/name_format.png) + +存储的图片可供进一步分析,是模型质量问题、图片质量问题、还是扰动方法影响图片语义从而导致预测错误。 + +![1646812837049](image/result_demo.png) \ No newline at end of file