You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_axolotl.py 8.9 kB

first commit Former-commit-id: 08bc23ba02cffbce3cf63962390a65459a132e48 [formerly 0795edd4834b9b7dc66db8d10d4cbaf42bbf82cb] [formerly b5010b42541add7e2ea2578bf2da537efc457757 [formerly a7ca09c2c34c4fc8b3d8e01fcfa08eeeb2cae99d]] [formerly 615058473a2177ca5b89e9edbb797f4c2a59c7e5 [formerly 743d8dfc6843c4c205051a8ab309fbb2116c895e] [formerly bb0ea98b1e14154ef464e2f7a16738705894e54b [formerly 960a69da74b81ef8093820e003f2d6c59a34974c]]] [formerly 2fa3be52c1b44665bc81a7cc7d4cea4bbf0d91d5 [formerly 2054589f0898627e0a17132fd9d4cc78efc91867] [formerly 3b53730e8a895e803dfdd6ca72bc05e17a4164c1 [formerly 8a2fa8ab7baf6686d21af1f322df46fd58c60e69]] [formerly 87d1e3a07a19d03c7d7c94d93ab4fa9f58dada7c [formerly f331916385a5afac1234854ee8d7f160f34b668f] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18 [formerly 386086f05aa9487f65bce2ee54438acbdce57650]]]] Former-commit-id: a00aed8c934a6460c4d9ac902b9a74a3d6864697 [formerly 26fdeca29c2f07916d837883983ca2982056c78e] [formerly 0e3170d41a2f99ecf5c918183d361d4399d793bf [formerly 3c12ad4c88ac5192e0f5606ac0d88dd5bf8602dc]] [formerly d5894f84f2fd2e77a6913efdc5ae388cf1be0495 [formerly ad3e7bc670ff92c992730d29c9d3aa1598d844e8] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18]] Former-commit-id: 3c19c9fae64f6106415fbc948a4dc613b9ee12f8 [formerly 467ddc0549c74bb007e8f01773bb6dc9103b417d] [formerly 5fa518345d958e2760e443b366883295de6d991c [formerly 3530e130b9fdb7280f638dbc2e785d2165ba82aa]] Former-commit-id: 9f5d473d42a435ec0d60149939d09be1acc25d92 [formerly be0b25c4ec2cde052a041baf0e11f774a158105d] Former-commit-id: 9eca71cb73ba9edccd70ac06a3b636b8d4093b04
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. def generate_metrics():
  2. from d3m.metadata.problem import PerformanceMetric
  3. metrics = [{'metric': PerformanceMetric.F1, 'params': {'pos_label': '1'}},
  4. ]
  5. return metrics
  6. def generate_data_preparation_params():
  7. from axolotl.utils import schemas as schemas_utils
  8. data_preparation_params = schemas_utils.DATA_PREPARATION_PARAMS['no_split']
  9. return data_preparation_params
  10. def generate_scoring_pipeline():
  11. from axolotl.utils import schemas as schemas_utils
  12. scoring_pipeline = schemas_utils.get_scoring_pipeline()
  13. return scoring_pipeline
  14. def generate_data_preparation_pipeline():
  15. from axolotl.utils import schemas as schemas_utils
  16. data_preparation_pipeline = schemas_utils.get_splitting_pipeline("TRAINING_DATA")
  17. return data_preparation_pipeline
  18. def generate_dataset_problems(dataset_infos):
  19. """
  20. Args:
  21. dataset_infos: A list of dataset info, including `path` and `target`
  22. Returns:
  23. A list of Dataset and Problem
  24. """
  25. import pandas as pd
  26. from axolotl.utils import data_problem
  27. from d3m.metadata.problem import TaskKeyword, PerformanceMetric
  28. dataset_problems = []
  29. for dataset_info in dataset_infos:
  30. table_path = dataset_info['path']
  31. target = dataset_info['target']
  32. df = pd.read_csv(table_path)
  33. dataset, problem_description = data_problem.generate_dataset_problem(df,
  34. target_index=target,
  35. task_keywords=[TaskKeyword.ANOMALY_DETECTION,],
  36. performance_metrics=[{'metric': PerformanceMetric.F1}])
  37. dataset_problems.append((dataset, problem_description))
  38. return dataset_problems
  39. # FIXME: Currently only consider algorithm
  40. def generate_pipelines(primitive_python_paths):
  41. """
  42. Args:
  43. primitive_python_paths: a list of primitive Python paths for algorithms
  44. Returns:
  45. the pipline description json
  46. """
  47. from d3m import index
  48. from d3m.metadata.base import ArgumentType
  49. from d3m.metadata.pipeline import Pipeline, PrimitiveStep
  50. from axolotl.utils import pipeline as pipeline_utils
  51. pipelines = []
  52. for primitive_python_path in primitive_python_paths:
  53. # Creating pipeline
  54. pipeline_description = Pipeline()
  55. pipeline_description.add_input(name='inputs')
  56. # The first three steps are fixed
  57. # Step 0: dataset_to_dataframe
  58. step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common'))
  59. step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0')
  60. step_0.add_output('produce')
  61. pipeline_description.add_step(step_0)
  62. # Step 1: column_parser
  63. step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common'))
  64. step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce')
  65. step_1.add_output('produce')
  66. pipeline_description.add_step(step_1)
  67. # Step 2: extract_columns_by_semantic_types(attributes)
  68. step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))
  69. step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')
  70. step_2.add_output('produce')
  71. step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE,
  72. data=['https://metadata.datadrivendiscovery.org/types/Attribute'])
  73. pipeline_description.add_step(step_2)
  74. # Step 3: extract_columns_by_semantic_types(targets)
  75. step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))
  76. step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce')
  77. step_3.add_output('produce')
  78. step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE,
  79. data=['https://metadata.datadrivendiscovery.org/types/TrueTarget'])
  80. pipeline_description.add_step(step_3)
  81. attributes = 'steps.2.produce'
  82. targets = 'steps.3.produce'
  83. # This one is what we want to test
  84. test_step = PrimitiveStep(primitive=index.get_primitive(primitive_python_path))
  85. test_step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes)
  86. test_step.add_output('produce')
  87. pipeline_description.add_step(test_step)
  88. # Finalize the pipeline
  89. final_step = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common'))
  90. final_step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce')
  91. final_step.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')
  92. final_step.add_output('produce')
  93. pipeline_description.add_step(final_step)
  94. pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce')
  95. pipelines.append(pipeline_description)
  96. return pipelines
  97. def test():
  98. # datasets to be tested
  99. dataset_infos = [
  100. {
  101. 'path': 'datasets/anomaly/yahoo_sub_5/yahoo_sub_5_dataset/tables/learningData.csv',
  102. 'target': 7
  103. },
  104. {
  105. 'path': 'datasets/anomaly/kpi/kpi_dataset/tables/learningData.csv',
  106. # 'path': 'datasets/anomaly/kpi/TRAIN/dataset_TRAIN/tables/learningData.csv',
  107. 'target': 3
  108. },
  109. ]
  110. # Algorithms to be tested
  111. # FIXME: Test more primitives
  112. primitive_python_paths = [
  113. 'd3m.primitives.tods.detection_algorithm.pyod_ae',
  114. 'd3m.primitives.tods.detection_algorithm.pyod_vae',
  115. 'd3m.primitives.tods.detection_algorithm.pyod_cof',
  116. 'd3m.primitives.tods.detection_algorithm.pyod_sod',
  117. 'd3m.primitives.tods.detection_algorithm.pyod_abod',
  118. 'd3m.primitives.tods.detection_algorithm.pyod_hbos',
  119. 'd3m.primitives.tods.detection_algorithm.pyod_iforest',
  120. 'd3m.primitives.tods.detection_algorithm.pyod_lof',
  121. 'd3m.primitives.tods.detection_algorithm.pyod_knn',
  122. 'd3m.primitives.tods.detection_algorithm.pyod_ocsvm',
  123. 'd3m.primitives.tods.detection_algorithm.pyod_loda',
  124. # 'd3m.primitives.tods.detection_algorithm.pyod_cblof',
  125. 'd3m.primitives.tods.detection_algorithm.pyod_sogaal',
  126. 'd3m.primitives.tods.detection_algorithm.pyod_mogaal',
  127. ]
  128. dataset_problems = generate_dataset_problems(dataset_infos)
  129. pipelines = generate_pipelines(primitive_python_paths)
  130. metrics = generate_metrics()
  131. data_preparation_pipeline = generate_data_preparation_pipeline()
  132. scoring_pipeline = generate_scoring_pipeline()
  133. data_preparation_params = generate_data_preparation_params()
  134. # Start running
  135. from axolotl.backend.simple import SimpleRunner
  136. backend = SimpleRunner(random_seed=0)
  137. for i, dataset_problem in enumerate(dataset_problems):
  138. dataset, problem_description = dataset_problem
  139. for j, pipeline in enumerate(pipelines):
  140. print('Dataset:', i, 'Pipline:', j)
  141. pipeline_result = backend.evaluate_pipeline(problem_description=problem_description,
  142. pipeline=pipeline,
  143. input_data=[dataset],
  144. metrics=metrics,
  145. data_preparation_pipeline=data_preparation_pipeline,
  146. scoring_pipeline=scoring_pipeline,
  147. data_preparation_params=data_preparation_params)
  148. print('Results')
  149. print('----------------------------')
  150. print(pipeline_result)
  151. print('----------------------------')
  152. if pipeline_result.status == 'ERRORED':
  153. print('Scoring pipeline is {}'.format(scoring_pipeline.id))
  154. print('Data preparation pipeline is {}'.format(data_preparation_pipeline.id))
  155. raise ValueError('ERRORED for dataset {}, primitive {}'.format(dataset_infos[i], primitive_python_paths[j]))
  156. if __name__ == "__main__":
  157. test()

全栈的自动化机器学习系统,主要针对多变量时间序列数据的异常检测。TODS提供了详尽的用于构建基于机器学习的异常检测系统的模块,它们包括:数据处理(data processing),时间序列处理( time series processing),特征分析(feature analysis),检测算法(detection algorithms),和强化模块( reinforcement module)。这些模块所提供的功能包括常见的数据预处理、时间序列数据的平滑或变换,从时域或频域中抽取特征、多种多样的检测算